entry_point
stringlengths 1
65
| original_triton_code
stringlengths 4.5k
619k
| python_code
stringlengths 208
60.9k
| triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
sequencelengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
| pytorch_code
stringlengths 200
4.05k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|
scSEmodule | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/l3/cl35tzbhrd24dhunkbb6gjs54aklpyr46oikqhoylcgmkcmhujil.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# x => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1, -2], True), kwargs = {})
triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/eb/cebpbupczy3a7z6yffgxybumq5trdt3jp5hxwuoo6w6cunzz7d7h.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_3 => relu
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_3), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/jd/cjdgjytdmb4vnqondclaeabo24zlhmig222taliklzkzh66d2idv.py
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_8 => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/us/cusd6yi6tl4wtmzejkdicqfu5rgcipisnhvjiy672xjqihjbzc43.py
# Topologically Sorted Source Nodes: [x_7, x_9, x_10, max_1], Original ATen: [aten.mul, aten.sigmoid, aten.maximum]
# Source node to ATen node mapping:
# max_1 => maximum
# x_10 => mul_1
# x_7 => mul
# x_9 => sigmoid_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %primals_1), kwargs = {})
# %sigmoid_1 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %primals_1), kwargs = {})
# %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%mul, %mul_1), kwargs = {})
triton_poi_fused_maximum_mul_sigmoid_3 = async_compile.triton('triton_poi_fused_maximum_mul_sigmoid_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_maximum_mul_sigmoid_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_maximum_mul_sigmoid_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = (xindex // 16)
x4 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (x4), xmask)
tmp4 = tl.load(in_ptr2 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp5 = tl.sigmoid(tmp4)
tmp6 = tmp5 * tmp2
tmp7 = triton_helpers.maximum(tmp3, tmp6)
tl.store(out_ptr0 + (x4), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2, 4), (4, 1))
assert_size_stride(primals_3, (2, ), (1, ))
assert_size_stride(primals_4, (4, 2), (2, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_0.run(buf1, primals_1, 16, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (4, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 2), (1, 4), 0), out=buf2)
del primals_2
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf3, primals_3, 8, grid=grid(8), stream=stream0)
del primals_3
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, buf3, reinterpret_tensor(primals_4, (2, 4), (1, 2), 0), alpha=1, beta=1, out=buf4)
del primals_5
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(primals_1, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 1, 4, 4), (16, 16, 4, 1))
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf6, primals_7, 64, grid=grid(64), stream=stream0)
del primals_7
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_7, x_9, x_10, max_1], Original ATen: [aten.mul, aten.sigmoid, aten.maximum]
triton_poi_fused_maximum_mul_sigmoid_3.run(buf4, primals_1, buf6, buf7, 256, grid=grid(256), stream=stream0)
return (buf7, primals_1, primals_6, reinterpret_tensor(buf1, (4, 4), (4, 1), 0), buf3, buf4, buf6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 2), (2, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class cSEmodule(nn.Module):
""" SpatialSequeezeExcitationModule
input: [B, C, H, W] torch tensor
output: [B, C, H, W] torch tensor
"""
def __init__(self, in_channel):
super().__init__()
self.global_avg = nn.AdaptiveAvgPool2d(1)
self.flatten = nn.Flatten()
self.down_linear = nn.Linear(in_channel, in_channel // 2)
self.up_linear = nn.Linear(in_channel // 2, in_channel)
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def forward(self, x):
b, c, _h, _w = x.shape
skip_connection = x
x = self.global_avg(x)
x = self.flatten(x)
x = self.down_linear(x)
x = self.relu(x)
x = self.up_linear(x)
x = self.sigmoid(x)
x = x.reshape(b, c, 1, 1)
x = x * skip_connection
return x
class sSEmodule(nn.Module):
""" ChannelSequeezeExcitationModule
input: [B, C, H, W] torch tensor
output: [B, C, H, W] torch tensor
"""
def __init__(self, in_channel):
super().__init__()
self.conv2d = nn.Conv2d(in_channel, 1, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
skip_connection = x
x = self.conv2d(x)
x = self.sigmoid(x)
None
x = x * skip_connection
return x
class scSEmodule(nn.Module):
""" ConcurrentSpatialChannelSequeezeExcitationModule
input: [B, C, H, W] torch tensor
output: [B, C, H, W] torch tensor
"""
def __init__(self, in_channel):
super().__init__()
self.cSEmodule = cSEmodule(in_channel=in_channel)
self.sSEmodule = sSEmodule(in_channel=in_channel)
def forward(self, x):
cse_branch = self.cSEmodule(x)
sse_branch = self.sSEmodule(x)
return torch.max(cse_branch, sse_branch)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channel': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused_maximum_mul_sigmoid_3(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 16
x4 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + x4, xmask)
tmp4 = tl.load(in_ptr2 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp5 = tl.sigmoid(tmp4)
tmp6 = tmp5 * tmp2
tmp7 = triton_helpers.maximum(tmp3, tmp6)
tl.store(out_ptr0 + x4, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2, 4), (4, 1))
assert_size_stride(primals_3, (2,), (1,))
assert_size_stride(primals_4, (4, 2), (2, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf2 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (4, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 2), (1, 4), 0), out=buf2)
del primals_2
buf3 = buf2
del buf2
triton_poi_fused_relu_1[grid(8)](buf3, primals_3, 8, XBLOCK=8,
num_warps=1, num_stages=1)
del primals_3
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, buf3, reinterpret_tensor(primals_4,
(2, 4), (1, 2), 0), alpha=1, beta=1, out=buf4)
del primals_5
buf5 = extern_kernels.convolution(primals_1, primals_6, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 1, 4, 4), (16, 16, 4, 1))
buf6 = buf5
del buf5
triton_poi_fused_convolution_2[grid(64)](buf6, primals_7, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_7
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_maximum_mul_sigmoid_3[grid(256)](buf4, primals_1,
buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1)
return buf7, primals_1, primals_6, reinterpret_tensor(buf1, (4, 4), (4,
1), 0), buf3, buf4, buf6, primals_4
class cSEmodule(nn.Module):
""" SpatialSequeezeExcitationModule
input: [B, C, H, W] torch tensor
output: [B, C, H, W] torch tensor
"""
def __init__(self, in_channel):
super().__init__()
self.global_avg = nn.AdaptiveAvgPool2d(1)
self.flatten = nn.Flatten()
self.down_linear = nn.Linear(in_channel, in_channel // 2)
self.up_linear = nn.Linear(in_channel // 2, in_channel)
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def forward(self, x):
b, c, _h, _w = x.shape
skip_connection = x
x = self.global_avg(x)
x = self.flatten(x)
x = self.down_linear(x)
x = self.relu(x)
x = self.up_linear(x)
x = self.sigmoid(x)
x = x.reshape(b, c, 1, 1)
x = x * skip_connection
return x
class sSEmodule(nn.Module):
""" ChannelSequeezeExcitationModule
input: [B, C, H, W] torch tensor
output: [B, C, H, W] torch tensor
"""
def __init__(self, in_channel):
super().__init__()
self.conv2d = nn.Conv2d(in_channel, 1, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
skip_connection = x
x = self.conv2d(x)
x = self.sigmoid(x)
None
x = x * skip_connection
return x
class scSEmoduleNew(nn.Module):
""" ConcurrentSpatialChannelSequeezeExcitationModule
input: [B, C, H, W] torch tensor
output: [B, C, H, W] torch tensor
"""
def __init__(self, in_channel):
super().__init__()
self.cSEmodule = cSEmodule(in_channel=in_channel)
self.sSEmodule = sSEmodule(in_channel=in_channel)
def forward(self, input_0):
primals_2 = self.cSEmodule.down_linear.weight
primals_3 = self.cSEmodule.down_linear.bias
primals_4 = self.cSEmodule.up_linear.weight
primals_5 = self.cSEmodule.up_linear.bias
primals_6 = self.sSEmodule.conv2d.weight
primals_7 = self.sSEmodule.conv2d.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| HwangJohn/feature_representation | scSEmodule | false | 2,358 | [
"MIT"
] | 0 | 27389caacc9c026b65f47ab0cbb4e6d0465e6a60 | https://github.com/HwangJohn/feature_representation/tree/27389caacc9c026b65f47ab0cbb4e6d0465e6a60 | import torch
import torch.nn as nn
class cSEmodule(nn.Module):
""" SpatialSequeezeExcitationModule
input: [B, C, H, W] torch tensor
output: [B, C, H, W] torch tensor
"""
def __init__(self, in_channel):
super().__init__()
self.global_avg = nn.AdaptiveAvgPool2d(1)
self.flatten = nn.Flatten()
self.down_linear = nn.Linear(in_channel, in_channel // 2)
self.up_linear = nn.Linear(in_channel // 2, in_channel)
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def forward(self, x):
b, c, _h, _w = x.shape
skip_connection = x
x = self.global_avg(x)
x = self.flatten(x)
x = self.down_linear(x)
x = self.relu(x)
x = self.up_linear(x)
x = self.sigmoid(x)
x = x.reshape(b, c, 1, 1)
x = x * skip_connection
return x
class sSEmodule(nn.Module):
""" ChannelSequeezeExcitationModule
input: [B, C, H, W] torch tensor
output: [B, C, H, W] torch tensor
"""
def __init__(self, in_channel):
super().__init__()
self.conv2d = nn.Conv2d(in_channel, 1, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
skip_connection = x
x = self.conv2d(x)
x = self.sigmoid(x)
None
x = x * skip_connection
return x
class Model(nn.Module):
""" ConcurrentSpatialChannelSequeezeExcitationModule
input: [B, C, H, W] torch tensor
output: [B, C, H, W] torch tensor
"""
def __init__(self, in_channel):
super().__init__()
self.cSEmodule = cSEmodule(in_channel=in_channel)
self.sSEmodule = sSEmodule(in_channel=in_channel)
def forward(self, x):
cse_branch = self.cSEmodule(x)
sse_branch = self.sSEmodule(x)
return torch.max(cse_branch, sse_branch)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Hsigmoid | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/gl/cgljna3wfarubemgd6d2p3bgazvfhdxtrcu7luu5yza3rrfkty2s.py
# Topologically Sorted Source Nodes: [add, relu6, truediv], Original ATen: [aten.add, aten.hardtanh, aten.div]
# Source node to ATen node mapping:
# add => add
# relu6 => clamp_max, clamp_min
# truediv => div
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 3.0), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 6), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%clamp_max, 6.0), kwargs = {})
triton_poi_fused_add_div_hardtanh_0 = async_compile.triton('triton_poi_fused_add_div_hardtanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_hardtanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_hardtanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = 0.16666666666666666
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, relu6, truediv], Original ATen: [aten.add, aten.hardtanh, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_hardtanh_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Hsigmoid(nn.Module):
def __init__(self, inplace=True):
super(Hsigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return F.relu6(x + 3.0, inplace=self.inplace) / 6.0
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_hardtanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = 0.16666666666666666
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_hardtanh_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class HsigmoidNew(nn.Module):
def __init__(self, inplace=True):
super(HsigmoidNew, self).__init__()
self.inplace = inplace
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| IgorDavidyuk/pytorch-mobilenet-v3 | Hsigmoid | false | 2,359 | [
"Apache-2.0"
] | 0 | 48678f80d9390b530cb97966db492cf01d1c4a43 | https://github.com/IgorDavidyuk/pytorch-mobilenet-v3/tree/48678f80d9390b530cb97966db492cf01d1c4a43 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, inplace=True):
super().__init__()
self.inplace = inplace
def forward(self, x):
return F.relu6(x + 3.0, inplace=self.inplace) / 6.0
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Hswish | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/jj/cjjcpa4jfom3kmx4ufnxtda3bmq466cpemkegyhzep2ymmlsg35l.py
# Topologically Sorted Source Nodes: [add, relu6, mul, truediv], Original ATen: [aten.add, aten.hardtanh, aten.mul, aten.div]
# Source node to ATen node mapping:
# add => add
# mul => mul
# relu6 => clamp_max, clamp_min
# truediv => div
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 3.0), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 6), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %clamp_max), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, 6.0), kwargs = {})
triton_poi_fused_add_div_hardtanh_mul_0 = async_compile.triton('triton_poi_fused_add_div_hardtanh_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_hardtanh_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = tmp0 * tmp6
tmp8 = 0.16666666666666666
tmp9 = tmp7 * tmp8
tl.store(out_ptr0 + (x0), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, relu6, mul, truediv], Original ATen: [aten.add, aten.hardtanh, aten.mul, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_hardtanh_mul_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Hswish(nn.Module):
def __init__(self, inplace=True):
super(Hswish, self).__init__()
self.inplace = inplace
def forward(self, x):
return x * F.relu6(x + 3.0, inplace=self.inplace) / 6.0
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = tmp0 * tmp6
tmp8 = 0.16666666666666666
tmp9 = tmp7 * tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_hardtanh_mul_0[grid(256)](arg0_1, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class HswishNew(nn.Module):
def __init__(self, inplace=True):
super(HswishNew, self).__init__()
self.inplace = inplace
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| IgorDavidyuk/pytorch-mobilenet-v3 | Hswish | false | 2,360 | [
"Apache-2.0"
] | 0 | 48678f80d9390b530cb97966db492cf01d1c4a43 | https://github.com/IgorDavidyuk/pytorch-mobilenet-v3/tree/48678f80d9390b530cb97966db492cf01d1c4a43 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, inplace=True):
super().__init__()
self.inplace = inplace
def forward(self, x):
return x * F.relu6(x + 3.0, inplace=self.inplace) / 6.0
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
ConvTemporalGraphical | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/kn/cknjlh5mzsg6tap75kwweiwuidyxeolmhbgzpsrthbqvrieuksvv.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# x_2 => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_2,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x4 = (xindex // 256)
x5 = (xindex // 16) % 16
x3 = (xindex // 64) % 4
x6 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x5) + (64*x1) + (256*x4)), xmask)
tmp1 = tl.load(in_ptr1 + (x3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x6), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (16, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (16, ), (1, ))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_4, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 4, 4), (256, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 4, 4, 4, 1), (256, 64, 16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf0, primals_3, buf1, 1024, grid=grid(1024), stream=stream0)
del buf0
del primals_3
buf2 = empty_strided_cuda((1, 64, 4), (256, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf1, (1, 64, 16), (0, 16, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4), (64, 4, 1), 0), out=buf2)
del buf1
return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_2, primals_4, reinterpret_tensor(primals_1, (1, 4, 16), (64, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class ConvTemporalGraphical(nn.Module):
"""The basic module for applying a graph convolution.
Args:
in_channels (int): Number of channels in the input sequence data
out_channels (int): Number of channels produced by the convolution
kernel_size (int): Size of the graph convolving kernel
t_kernel_size (int): Size of the temporal convolving kernel
t_stride (int, optional): Stride of the temporal convolution. Default: 1
t_padding (int, optional): Temporal zero-padding added to both sides of
the input. Default: 0
t_dilation (int, optional): Spacing between temporal kernel elements.
Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the output.
Default: ``True``
Shape:
- Input[0]: Input graph sequence in :math:`(N, in_channels, T_{in}, V)` format
- Input[1]: Input graph adjacency matrix in :math:`(K, V, V)` format
- Output[0]: Output graph sequence in :math:`(N, out_channels, T_{out}, V)` format
- Output[1]: Graph adjacency matrix for output data in :math:`(K, V, V)` format
where
:math:`N` is a batch size,
:math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`,
:math:`T_{in}/T_{out}` is a length of input/output sequence,
:math:`V` is the number of graph nodes.
"""
def __init__(self, in_channels, out_channels, kernel_size,
t_kernel_size=1, t_stride=1, t_padding=0, t_dilation=1, bias=True):
super().__init__()
self.kernel_size = kernel_size
self.conv = nn.Conv2d(in_channels, out_channels * kernel_size,
kernel_size=(t_kernel_size, 1), padding=(t_padding, 0), stride=
(t_stride, 1), dilation=(t_dilation, 1), bias=bias)
def forward(self, x, A):
assert A.size(0) == self.kernel_size
x = self.conv(x)
n, kc, t, v = x.size()
x = x.view(n, self.kernel_size, kc // self.kernel_size, t, v)
x = torch.einsum('nkctv,kvw->nctw', (x, A))
return x.contiguous(), A
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x4 = xindex // 256
x5 = xindex // 16 % 16
x3 = xindex // 64 % 4
x6 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x5 + 64 * x1 + 256 * x4), xmask)
tmp1 = tl.load(in_ptr1 + (x3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x6, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (16, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (16,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_4, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 4, 4), (256, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 4, 4, 4, 1), (256, 64, 16, 4, 1, 1
), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(1024)](buf0, primals_3, buf1, 1024,
XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del primals_3
buf2 = empty_strided_cuda((1, 64, 4), (256, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (1, 64, 16), (0, 16, 1),
0), reinterpret_tensor(primals_1, (1, 16, 4), (64, 4, 1), 0),
out=buf2)
del buf1
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_2, primals_4, reinterpret_tensor(primals_1, (1, 4, 16),
(64, 1, 4), 0)
class ConvTemporalGraphicalNew(nn.Module):
"""The basic module for applying a graph convolution.
Args:
in_channels (int): Number of channels in the input sequence data
out_channels (int): Number of channels produced by the convolution
kernel_size (int): Size of the graph convolving kernel
t_kernel_size (int): Size of the temporal convolving kernel
t_stride (int, optional): Stride of the temporal convolution. Default: 1
t_padding (int, optional): Temporal zero-padding added to both sides of
the input. Default: 0
t_dilation (int, optional): Spacing between temporal kernel elements.
Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the output.
Default: ``True``
Shape:
- Input[0]: Input graph sequence in :math:`(N, in_channels, T_{in}, V)` format
- Input[1]: Input graph adjacency matrix in :math:`(K, V, V)` format
- Output[0]: Output graph sequence in :math:`(N, out_channels, T_{out}, V)` format
- Output[1]: Graph adjacency matrix for output data in :math:`(K, V, V)` format
where
:math:`N` is a batch size,
:math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`,
:math:`T_{in}/T_{out}` is a length of input/output sequence,
:math:`V` is the number of graph nodes.
"""
def __init__(self, in_channels, out_channels, kernel_size,
t_kernel_size=1, t_stride=1, t_padding=0, t_dilation=1, bias=True):
super().__init__()
self.kernel_size = kernel_size
self.conv = nn.Conv2d(in_channels, out_channels * kernel_size,
kernel_size=(t_kernel_size, 1), padding=(t_padding, 0), stride=
(t_stride, 1), dilation=(t_dilation, 1), bias=bias)
def forward(self, input_0, input_1):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_4 = input_0
primals_1 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0], output[1]
| Hunkzer/mmskeleton | ConvTemporalGraphical | false | 2,361 | [
"Apache-2.0"
] | 0 | 551e3b4fa01330b23caab5815a40fbd848400b15 | https://github.com/Hunkzer/mmskeleton/tree/551e3b4fa01330b23caab5815a40fbd848400b15 | import torch
import torch.nn as nn
class Model(nn.Module):
"""The basic module for applying a graph convolution.
Args:
in_channels (int): Number of channels in the input sequence data
out_channels (int): Number of channels produced by the convolution
kernel_size (int): Size of the graph convolving kernel
t_kernel_size (int): Size of the temporal convolving kernel
t_stride (int, optional): Stride of the temporal convolution. Default: 1
t_padding (int, optional): Temporal zero-padding added to both sides of
the input. Default: 0
t_dilation (int, optional): Spacing between temporal kernel elements.
Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the output.
Default: ``True``
Shape:
- Input[0]: Input graph sequence in :math:`(N, in_channels, T_{in}, V)` format
- Input[1]: Input graph adjacency matrix in :math:`(K, V, V)` format
- Output[0]: Output graph sequence in :math:`(N, out_channels, T_{out}, V)` format
- Output[1]: Graph adjacency matrix for output data in :math:`(K, V, V)` format
where
:math:`N` is a batch size,
:math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`,
:math:`T_{in}/T_{out}` is a length of input/output sequence,
:math:`V` is the number of graph nodes.
"""
def __init__(self, in_channels, out_channels, kernel_size,
t_kernel_size=1, t_stride=1, t_padding=0, t_dilation=1, bias=True):
super().__init__()
self.kernel_size = kernel_size
self.conv = nn.Conv2d(in_channels, out_channels * kernel_size,
kernel_size=(t_kernel_size, 1), padding=(t_padding, 0), stride=
(t_stride, 1), dilation=(t_dilation, 1), bias=bias)
def forward(self, x, A):
assert A.size(0) == self.kernel_size
x = self.conv(x)
n, kc, t, v = x.size()
x = x.view(n, self.kernel_size, kc // self.kernel_size, t, v)
x = torch.einsum('nkctv,kvw->nctw', (x, A))
return x.contiguous(), A
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
L1Part | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/xm/cxmk2kod6zgjturywionsuihaxqils4fvzrd7bziqpvptc3rgw43.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 4
x0 = xindex % 16
x2 = (xindex // 64)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 4, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (16*((-1) + x1)) + (48*x2)), tmp6 & xmask, other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/zs/czsfs6ahyese3ubiqpgbbisl635tvyjuqc3erzc4wmskprhkhyxd.py
# Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten._prelu_kernel]
# Source node to ATen node mapping:
# input_1 => convolution
# input_2 => gt, mul, where
# Graph fragment:
# %convolution : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_3, %primals_4, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %convolution), kwargs = {})
# %where : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
triton_poi_fused__prelu_kernel_convolution_1 = async_compile.triton('triton_poi_fused__prelu_kernel_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__prelu_kernel_convolution_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 6144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 96
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp6 = tmp5 * tmp2
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + (x3), tmp2, None)
tl.store(out_ptr0 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/mv/cmvevtdkhwfc2cmmwtwxydyesmlh6ixngyfut335uwjgjx67gl5a.py
# Topologically Sorted Source Nodes: [input_5], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# input_5 => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%where_1, %primals_9, %primals_10, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 6144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 96
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/uf/cufgpwyejxyphsulcvmt4uybqyew7btltqaajnqbr2rwjvmglk3r.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# out => cat_1
# Graph fragment:
# %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%where, %where_1, %where_2], 1), kwargs = {})
triton_poi_fused_cat_3 = async_compile.triton('triton_poi_fused_cat_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 18432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 16) % 288
x0 = xindex % 16
x2 = (xindex // 4608)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 96, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (1536*x2)), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 192, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + (16*((-96) + x1)) + (1536*x2)), tmp9, other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 288, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tl.load(in_ptr2 + (x0 + (16*((-192) + x1)) + (1536*x2)), tmp11, other=0.0)
tmp15 = 0.0
tmp16 = tmp14 > tmp15
tmp17 = tl.load(in_ptr3 + ((-192) + x1), tmp11, eviction_policy='evict_last', other=0.0)
tmp18 = tmp17 * tmp14
tmp19 = tl.where(tmp16, tmp14, tmp18)
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp11, tmp19, tmp20)
tmp22 = tl.where(tmp9, tmp10, tmp21)
tmp23 = tl.where(tmp4, tmp5, tmp22)
tl.store(out_ptr0 + (x3), tmp23, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/e4/ce4avpk4bjkudyp6l55wdtczkmgh7ompm234wiweom24y332sjsz.py
# Topologically Sorted Source Nodes: [input_31, input_32], Original ATen: [aten.convolution, aten._prelu_kernel]
# Source node to ATen node mapping:
# input_31 => convolution_15
# input_32 => gt_15, mul_15, where_15
# Graph fragment:
# %convolution_15 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_5, %primals_48, %primals_49, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_15 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_15, 0), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_15, %convolution_15), kwargs = {})
# %where_15 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_15, %convolution_15, %mul_15), kwargs = {})
triton_poi_fused__prelu_kernel_convolution_4 = async_compile.triton('triton_poi_fused__prelu_kernel_convolution_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_convolution_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__prelu_kernel_convolution_4(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 256
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp6 = tmp5 * tmp2
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + (x3), tmp2, None)
tl.store(out_ptr0 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ct/cctjop2n3ffrqycno5j5nk2mkyqms6wanugpklbgmz5c76e3ladm.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_1 => cat_6
# Graph fragment:
# %cat_6 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %convolution_16, %primals_2], 1), kwargs = {})
triton_poi_fused_cat_5 = async_compile.triton('triton_poi_fused_cat_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 8
x0 = xindex % 16
x2 = (xindex // 128)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 5, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + (16*((-1) + x1)) + (64*x2)), tmp9 & xmask, other=0.0)
tmp11 = tl.load(in_ptr2 + ((-1) + x1), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp9, tmp12, tmp13)
tmp15 = tmp0 >= tmp7
tmp16 = tl.full([1], 8, tl.int64)
tmp17 = tmp0 < tmp16
tmp18 = tl.load(in_ptr3 + (x0 + (16*((-5) + x1)) + (48*x2)), tmp15 & xmask, other=0.0)
tmp19 = tl.where(tmp9, tmp14, tmp18)
tmp20 = tl.where(tmp4, tmp5, tmp19)
tl.store(out_ptr0 + (x3), tmp20, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/p4/cp4a5jlq4cbmfl3sweyw54nzmfyskdqwtjh6fssswkg23w2tniol.py
# Topologically Sorted Source Nodes: [input_34, input_35], Original ATen: [aten.convolution, aten._prelu_kernel]
# Source node to ATen node mapping:
# input_34 => convolution_17
# input_35 => gt_16, mul_16, where_16
# Graph fragment:
# %convolution_17 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_6, %primals_53, %primals_54, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_16 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_17, 0), kwargs = {})
# %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_16, %convolution_17), kwargs = {})
# %where_16 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_16, %convolution_17, %mul_16), kwargs = {})
triton_poi_fused__prelu_kernel_convolution_6 = async_compile.triton('triton_poi_fused__prelu_kernel_convolution_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_convolution_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__prelu_kernel_convolution_6(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 128
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp6 = tmp5 * tmp2
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + (x3), tmp2, None)
tl.store(out_ptr0 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/hr/chrr2gb2jauwrwwre2wv7evrolayup7ncao4vci4fe2n6xapka32.py
# Topologically Sorted Source Nodes: [input_38], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# input_38 => convolution_19
# Graph fragment:
# %convolution_19 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%where_17, %primals_59, %primals_60, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_7 = async_compile.triton('triton_poi_fused_convolution_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 128
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/4s/c4sw37jqouiudkv5w7j2ozsgwfxpfhnynj6jzqmcud4d4jdpw63u.py
# Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# out_5 => cat_7
# Graph fragment:
# %cat_7 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%where_16, %where_17, %where_18], 1), kwargs = {})
triton_poi_fused_cat_8 = async_compile.triton('triton_poi_fused_cat_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 24576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 16) % 384
x0 = xindex % 16
x2 = (xindex // 6144)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 128, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (2048*x2)), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 256, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + (16*((-128) + x1)) + (2048*x2)), tmp9, other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 384, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tl.load(in_ptr2 + (x0 + (16*((-256) + x1)) + (2048*x2)), tmp11, other=0.0)
tmp15 = 0.0
tmp16 = tmp14 > tmp15
tmp17 = tl.load(in_ptr3 + ((-256) + x1), tmp11, eviction_policy='evict_last', other=0.0)
tmp18 = tmp17 * tmp14
tmp19 = tl.where(tmp16, tmp14, tmp18)
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp11, tmp19, tmp20)
tmp22 = tl.where(tmp9, tmp10, tmp21)
tmp23 = tl.where(tmp4, tmp5, tmp22)
tl.store(out_ptr0 + (x3), tmp23, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/se/cseugy6yibv2paeqa72icgc2454rfxzhy3lgd3s473bai6dv6nu2.py
# Topologically Sorted Source Nodes: [input_64, input_65], Original ATen: [aten.convolution, aten._prelu_kernel]
# Source node to ATen node mapping:
# input_64 => convolution_32
# input_65 => gt_31, mul_31, where_31
# Graph fragment:
# %convolution_32 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_11, %primals_98, %primals_99, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_31 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_32, 0), kwargs = {})
# %mul_31 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_31, %convolution_32), kwargs = {})
# %where_31 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_31, %convolution_32, %mul_31), kwargs = {})
triton_poi_fused__prelu_kernel_convolution_9 = async_compile.triton('triton_poi_fused__prelu_kernel_convolution_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_convolution_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__prelu_kernel_convolution_9(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 512
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp6 = tmp5 * tmp2
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + (x3), tmp2, None)
tl.store(out_ptr0 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/gv/cgvoi43bpqyynthafx6tebrihvw377v4h3qwua5ufjwyy5jej2lw.py
# Topologically Sorted Source Nodes: [input_66], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# input_66 => convolution_33
# Graph fragment:
# %convolution_33 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_31, %primals_101, %primals_102, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_10 = async_compile.triton('triton_poi_fused_convolution_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_2, (4, 3, 4, 4), (48, 16, 4, 1))
assert_size_stride(primals_3, (96, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_4, (96, ), (1, ))
assert_size_stride(primals_5, (96, ), (1, ))
assert_size_stride(primals_6, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_7, (96, ), (1, ))
assert_size_stride(primals_8, (96, ), (1, ))
assert_size_stride(primals_9, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_10, (96, ), (1, ))
assert_size_stride(primals_11, (96, ), (1, ))
assert_size_stride(primals_12, (96, 288, 3, 3), (2592, 9, 3, 1))
assert_size_stride(primals_13, (96, ), (1, ))
assert_size_stride(primals_14, (96, ), (1, ))
assert_size_stride(primals_15, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_16, (96, ), (1, ))
assert_size_stride(primals_17, (96, ), (1, ))
assert_size_stride(primals_18, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_19, (96, ), (1, ))
assert_size_stride(primals_20, (96, ), (1, ))
assert_size_stride(primals_21, (96, 288, 3, 3), (2592, 9, 3, 1))
assert_size_stride(primals_22, (96, ), (1, ))
assert_size_stride(primals_23, (96, ), (1, ))
assert_size_stride(primals_24, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_25, (96, ), (1, ))
assert_size_stride(primals_26, (96, ), (1, ))
assert_size_stride(primals_27, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_28, (96, ), (1, ))
assert_size_stride(primals_29, (96, ), (1, ))
assert_size_stride(primals_30, (96, 288, 3, 3), (2592, 9, 3, 1))
assert_size_stride(primals_31, (96, ), (1, ))
assert_size_stride(primals_32, (96, ), (1, ))
assert_size_stride(primals_33, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_34, (96, ), (1, ))
assert_size_stride(primals_35, (96, ), (1, ))
assert_size_stride(primals_36, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_37, (96, ), (1, ))
assert_size_stride(primals_38, (96, ), (1, ))
assert_size_stride(primals_39, (96, 288, 3, 3), (2592, 9, 3, 1))
assert_size_stride(primals_40, (96, ), (1, ))
assert_size_stride(primals_41, (96, ), (1, ))
assert_size_stride(primals_42, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_43, (96, ), (1, ))
assert_size_stride(primals_44, (96, ), (1, ))
assert_size_stride(primals_45, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_46, (96, ), (1, ))
assert_size_stride(primals_47, (96, ), (1, ))
assert_size_stride(primals_48, (256, 288, 1, 1), (288, 1, 1, 1))
assert_size_stride(primals_49, (256, ), (1, ))
assert_size_stride(primals_50, (256, ), (1, ))
assert_size_stride(primals_51, (4, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_52, (4, ), (1, ))
assert_size_stride(primals_53, (128, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_54, (128, ), (1, ))
assert_size_stride(primals_55, (128, ), (1, ))
assert_size_stride(primals_56, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_57, (128, ), (1, ))
assert_size_stride(primals_58, (128, ), (1, ))
assert_size_stride(primals_59, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_60, (128, ), (1, ))
assert_size_stride(primals_61, (128, ), (1, ))
assert_size_stride(primals_62, (128, 384, 3, 3), (3456, 9, 3, 1))
assert_size_stride(primals_63, (128, ), (1, ))
assert_size_stride(primals_64, (128, ), (1, ))
assert_size_stride(primals_65, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_66, (128, ), (1, ))
assert_size_stride(primals_67, (128, ), (1, ))
assert_size_stride(primals_68, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_69, (128, ), (1, ))
assert_size_stride(primals_70, (128, ), (1, ))
assert_size_stride(primals_71, (128, 384, 3, 3), (3456, 9, 3, 1))
assert_size_stride(primals_72, (128, ), (1, ))
assert_size_stride(primals_73, (128, ), (1, ))
assert_size_stride(primals_74, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_75, (128, ), (1, ))
assert_size_stride(primals_76, (128, ), (1, ))
assert_size_stride(primals_77, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_78, (128, ), (1, ))
assert_size_stride(primals_79, (128, ), (1, ))
assert_size_stride(primals_80, (128, 384, 3, 3), (3456, 9, 3, 1))
assert_size_stride(primals_81, (128, ), (1, ))
assert_size_stride(primals_82, (128, ), (1, ))
assert_size_stride(primals_83, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_84, (128, ), (1, ))
assert_size_stride(primals_85, (128, ), (1, ))
assert_size_stride(primals_86, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_87, (128, ), (1, ))
assert_size_stride(primals_88, (128, ), (1, ))
assert_size_stride(primals_89, (128, 384, 3, 3), (3456, 9, 3, 1))
assert_size_stride(primals_90, (128, ), (1, ))
assert_size_stride(primals_91, (128, ), (1, ))
assert_size_stride(primals_92, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_93, (128, ), (1, ))
assert_size_stride(primals_94, (128, ), (1, ))
assert_size_stride(primals_95, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_96, (128, ), (1, ))
assert_size_stride(primals_97, (128, ), (1, ))
assert_size_stride(primals_98, (512, 384, 1, 1), (384, 1, 1, 1))
assert_size_stride(primals_99, (512, ), (1, ))
assert_size_stride(primals_100, (512, ), (1, ))
assert_size_stride(primals_101, (4, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_102, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 96, 4, 4), (1536, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
buf3 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_1.run(buf2, primals_4, primals_5, buf3, 6144, grid=grid(6144), stream=stream0)
del primals_4
# Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 96, 4, 4), (1536, 16, 4, 1))
buf5 = buf4; del buf4 # reuse
buf6 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_3, input_4], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_1.run(buf5, primals_7, primals_8, buf6, 6144, grid=grid(6144), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [input_5], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(buf6, primals_9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 96, 4, 4), (1536, 16, 4, 1))
buf8 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [input_5], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf8, primals_10, 6144, grid=grid(6144), stream=stream0)
del primals_10
buf9 = empty_strided_cuda((4, 288, 4, 4), (4608, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.cat]
triton_poi_fused_cat_3.run(buf3, buf6, buf8, primals_11, buf9, 18432, grid=grid(18432), stream=stream0)
# Topologically Sorted Source Nodes: [input_7], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 96, 4, 4), (1536, 16, 4, 1))
buf11 = buf10; del buf10 # reuse
buf12 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_7, input_8], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_1.run(buf11, primals_13, primals_14, buf12, 6144, grid=grid(6144), stream=stream0)
del primals_13
# Topologically Sorted Source Nodes: [input_9], Original ATen: [aten.convolution]
buf13 = extern_kernels.convolution(buf12, primals_15, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 96, 4, 4), (1536, 16, 4, 1))
buf14 = buf13; del buf13 # reuse
buf15 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_9, input_10], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_1.run(buf14, primals_16, primals_17, buf15, 6144, grid=grid(6144), stream=stream0)
del primals_16
# Topologically Sorted Source Nodes: [input_11], Original ATen: [aten.convolution]
buf16 = extern_kernels.convolution(buf15, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 96, 4, 4), (1536, 16, 4, 1))
buf17 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [input_11], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf17, primals_19, 6144, grid=grid(6144), stream=stream0)
del primals_19
buf18 = empty_strided_cuda((4, 288, 4, 4), (4608, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.cat]
triton_poi_fused_cat_3.run(buf12, buf15, buf17, primals_20, buf18, 18432, grid=grid(18432), stream=stream0)
# Topologically Sorted Source Nodes: [input_13], Original ATen: [aten.convolution]
buf19 = extern_kernels.convolution(buf18, primals_21, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf19, (4, 96, 4, 4), (1536, 16, 4, 1))
buf20 = buf19; del buf19 # reuse
buf21 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_13, input_14], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_1.run(buf20, primals_22, primals_23, buf21, 6144, grid=grid(6144), stream=stream0)
del primals_22
# Topologically Sorted Source Nodes: [input_15], Original ATen: [aten.convolution]
buf22 = extern_kernels.convolution(buf21, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 96, 4, 4), (1536, 16, 4, 1))
buf23 = buf22; del buf22 # reuse
buf24 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_15, input_16], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_1.run(buf23, primals_25, primals_26, buf24, 6144, grid=grid(6144), stream=stream0)
del primals_25
# Topologically Sorted Source Nodes: [input_17], Original ATen: [aten.convolution]
buf25 = extern_kernels.convolution(buf24, primals_27, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 96, 4, 4), (1536, 16, 4, 1))
buf26 = buf25; del buf25 # reuse
# Topologically Sorted Source Nodes: [input_17], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf26, primals_28, 6144, grid=grid(6144), stream=stream0)
del primals_28
buf27 = empty_strided_cuda((4, 288, 4, 4), (4608, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.cat]
triton_poi_fused_cat_3.run(buf21, buf24, buf26, primals_29, buf27, 18432, grid=grid(18432), stream=stream0)
# Topologically Sorted Source Nodes: [input_19], Original ATen: [aten.convolution]
buf28 = extern_kernels.convolution(buf27, primals_30, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 96, 4, 4), (1536, 16, 4, 1))
buf29 = buf28; del buf28 # reuse
buf30 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_19, input_20], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_1.run(buf29, primals_31, primals_32, buf30, 6144, grid=grid(6144), stream=stream0)
del primals_31
# Topologically Sorted Source Nodes: [input_21], Original ATen: [aten.convolution]
buf31 = extern_kernels.convolution(buf30, primals_33, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf31, (4, 96, 4, 4), (1536, 16, 4, 1))
buf32 = buf31; del buf31 # reuse
buf33 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_21, input_22], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_1.run(buf32, primals_34, primals_35, buf33, 6144, grid=grid(6144), stream=stream0)
del primals_34
# Topologically Sorted Source Nodes: [input_23], Original ATen: [aten.convolution]
buf34 = extern_kernels.convolution(buf33, primals_36, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf34, (4, 96, 4, 4), (1536, 16, 4, 1))
buf35 = buf34; del buf34 # reuse
# Topologically Sorted Source Nodes: [input_23], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf35, primals_37, 6144, grid=grid(6144), stream=stream0)
del primals_37
buf36 = empty_strided_cuda((4, 288, 4, 4), (4608, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.cat]
triton_poi_fused_cat_3.run(buf30, buf33, buf35, primals_38, buf36, 18432, grid=grid(18432), stream=stream0)
# Topologically Sorted Source Nodes: [input_25], Original ATen: [aten.convolution]
buf37 = extern_kernels.convolution(buf36, primals_39, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf37, (4, 96, 4, 4), (1536, 16, 4, 1))
buf38 = buf37; del buf37 # reuse
buf39 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_25, input_26], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_1.run(buf38, primals_40, primals_41, buf39, 6144, grid=grid(6144), stream=stream0)
del primals_40
# Topologically Sorted Source Nodes: [input_27], Original ATen: [aten.convolution]
buf40 = extern_kernels.convolution(buf39, primals_42, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf40, (4, 96, 4, 4), (1536, 16, 4, 1))
buf41 = buf40; del buf40 # reuse
buf42 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_27, input_28], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_1.run(buf41, primals_43, primals_44, buf42, 6144, grid=grid(6144), stream=stream0)
del primals_43
# Topologically Sorted Source Nodes: [input_29], Original ATen: [aten.convolution]
buf43 = extern_kernels.convolution(buf42, primals_45, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf43, (4, 96, 4, 4), (1536, 16, 4, 1))
buf44 = buf43; del buf43 # reuse
# Topologically Sorted Source Nodes: [input_29], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf44, primals_46, 6144, grid=grid(6144), stream=stream0)
del primals_46
buf45 = empty_strided_cuda((4, 288, 4, 4), (4608, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.cat]
triton_poi_fused_cat_3.run(buf39, buf42, buf44, primals_47, buf45, 18432, grid=grid(18432), stream=stream0)
# Topologically Sorted Source Nodes: [input_31], Original ATen: [aten.convolution]
buf46 = extern_kernels.convolution(buf45, primals_48, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf46, (4, 256, 4, 4), (4096, 16, 4, 1))
buf47 = buf46; del buf46 # reuse
buf48 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_31, input_32], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_4.run(buf47, primals_49, primals_50, buf48, 16384, grid=grid(16384), stream=stream0)
del primals_49
# Topologically Sorted Source Nodes: [input_33], Original ATen: [aten.convolution]
buf49 = extern_kernels.convolution(buf48, primals_51, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf49, (4, 4, 4, 4), (64, 16, 4, 1))
buf50 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.cat]
triton_poi_fused_cat_5.run(primals_1, buf49, primals_52, primals_2, buf50, 512, grid=grid(512), stream=stream0)
del buf49
del primals_1
del primals_2
del primals_52
# Topologically Sorted Source Nodes: [input_34], Original ATen: [aten.convolution]
buf51 = extern_kernels.convolution(buf50, primals_53, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf51, (4, 128, 4, 4), (2048, 16, 4, 1))
buf52 = buf51; del buf51 # reuse
buf53 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_34, input_35], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_6.run(buf52, primals_54, primals_55, buf53, 8192, grid=grid(8192), stream=stream0)
del primals_54
# Topologically Sorted Source Nodes: [input_36], Original ATen: [aten.convolution]
buf54 = extern_kernels.convolution(buf53, primals_56, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf54, (4, 128, 4, 4), (2048, 16, 4, 1))
buf55 = buf54; del buf54 # reuse
buf56 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_36, input_37], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_6.run(buf55, primals_57, primals_58, buf56, 8192, grid=grid(8192), stream=stream0)
del primals_57
# Topologically Sorted Source Nodes: [input_38], Original ATen: [aten.convolution]
buf57 = extern_kernels.convolution(buf56, primals_59, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf57, (4, 128, 4, 4), (2048, 16, 4, 1))
buf58 = buf57; del buf57 # reuse
# Topologically Sorted Source Nodes: [input_38], Original ATen: [aten.convolution]
triton_poi_fused_convolution_7.run(buf58, primals_60, 8192, grid=grid(8192), stream=stream0)
del primals_60
buf59 = empty_strided_cuda((4, 384, 4, 4), (6144, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.cat]
triton_poi_fused_cat_8.run(buf53, buf56, buf58, primals_61, buf59, 24576, grid=grid(24576), stream=stream0)
# Topologically Sorted Source Nodes: [input_40], Original ATen: [aten.convolution]
buf60 = extern_kernels.convolution(buf59, primals_62, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf60, (4, 128, 4, 4), (2048, 16, 4, 1))
buf61 = buf60; del buf60 # reuse
buf62 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_40, input_41], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_6.run(buf61, primals_63, primals_64, buf62, 8192, grid=grid(8192), stream=stream0)
del primals_63
# Topologically Sorted Source Nodes: [input_42], Original ATen: [aten.convolution]
buf63 = extern_kernels.convolution(buf62, primals_65, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf63, (4, 128, 4, 4), (2048, 16, 4, 1))
buf64 = buf63; del buf63 # reuse
buf65 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_42, input_43], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_6.run(buf64, primals_66, primals_67, buf65, 8192, grid=grid(8192), stream=stream0)
del primals_66
# Topologically Sorted Source Nodes: [input_44], Original ATen: [aten.convolution]
buf66 = extern_kernels.convolution(buf65, primals_68, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf66, (4, 128, 4, 4), (2048, 16, 4, 1))
buf67 = buf66; del buf66 # reuse
# Topologically Sorted Source Nodes: [input_44], Original ATen: [aten.convolution]
triton_poi_fused_convolution_7.run(buf67, primals_69, 8192, grid=grid(8192), stream=stream0)
del primals_69
buf68 = empty_strided_cuda((4, 384, 4, 4), (6144, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_6], Original ATen: [aten.cat]
triton_poi_fused_cat_8.run(buf62, buf65, buf67, primals_70, buf68, 24576, grid=grid(24576), stream=stream0)
# Topologically Sorted Source Nodes: [input_46], Original ATen: [aten.convolution]
buf69 = extern_kernels.convolution(buf68, primals_71, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf69, (4, 128, 4, 4), (2048, 16, 4, 1))
buf70 = buf69; del buf69 # reuse
buf71 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_46, input_47], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_6.run(buf70, primals_72, primals_73, buf71, 8192, grid=grid(8192), stream=stream0)
del primals_72
# Topologically Sorted Source Nodes: [input_48], Original ATen: [aten.convolution]
buf72 = extern_kernels.convolution(buf71, primals_74, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf72, (4, 128, 4, 4), (2048, 16, 4, 1))
buf73 = buf72; del buf72 # reuse
buf74 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_48, input_49], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_6.run(buf73, primals_75, primals_76, buf74, 8192, grid=grid(8192), stream=stream0)
del primals_75
# Topologically Sorted Source Nodes: [input_50], Original ATen: [aten.convolution]
buf75 = extern_kernels.convolution(buf74, primals_77, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf75, (4, 128, 4, 4), (2048, 16, 4, 1))
buf76 = buf75; del buf75 # reuse
# Topologically Sorted Source Nodes: [input_50], Original ATen: [aten.convolution]
triton_poi_fused_convolution_7.run(buf76, primals_78, 8192, grid=grid(8192), stream=stream0)
del primals_78
buf77 = empty_strided_cuda((4, 384, 4, 4), (6144, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_7], Original ATen: [aten.cat]
triton_poi_fused_cat_8.run(buf71, buf74, buf76, primals_79, buf77, 24576, grid=grid(24576), stream=stream0)
# Topologically Sorted Source Nodes: [input_52], Original ATen: [aten.convolution]
buf78 = extern_kernels.convolution(buf77, primals_80, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf78, (4, 128, 4, 4), (2048, 16, 4, 1))
buf79 = buf78; del buf78 # reuse
buf80 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_52, input_53], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_6.run(buf79, primals_81, primals_82, buf80, 8192, grid=grid(8192), stream=stream0)
del primals_81
# Topologically Sorted Source Nodes: [input_54], Original ATen: [aten.convolution]
buf81 = extern_kernels.convolution(buf80, primals_83, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf81, (4, 128, 4, 4), (2048, 16, 4, 1))
buf82 = buf81; del buf81 # reuse
buf83 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_54, input_55], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_6.run(buf82, primals_84, primals_85, buf83, 8192, grid=grid(8192), stream=stream0)
del primals_84
# Topologically Sorted Source Nodes: [input_56], Original ATen: [aten.convolution]
buf84 = extern_kernels.convolution(buf83, primals_86, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf84, (4, 128, 4, 4), (2048, 16, 4, 1))
buf85 = buf84; del buf84 # reuse
# Topologically Sorted Source Nodes: [input_56], Original ATen: [aten.convolution]
triton_poi_fused_convolution_7.run(buf85, primals_87, 8192, grid=grid(8192), stream=stream0)
del primals_87
buf86 = empty_strided_cuda((4, 384, 4, 4), (6144, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_8], Original ATen: [aten.cat]
triton_poi_fused_cat_8.run(buf80, buf83, buf85, primals_88, buf86, 24576, grid=grid(24576), stream=stream0)
# Topologically Sorted Source Nodes: [input_58], Original ATen: [aten.convolution]
buf87 = extern_kernels.convolution(buf86, primals_89, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf87, (4, 128, 4, 4), (2048, 16, 4, 1))
buf88 = buf87; del buf87 # reuse
buf89 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_58, input_59], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_6.run(buf88, primals_90, primals_91, buf89, 8192, grid=grid(8192), stream=stream0)
del primals_90
# Topologically Sorted Source Nodes: [input_60], Original ATen: [aten.convolution]
buf90 = extern_kernels.convolution(buf89, primals_92, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf90, (4, 128, 4, 4), (2048, 16, 4, 1))
buf91 = buf90; del buf90 # reuse
buf92 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_60, input_61], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_6.run(buf91, primals_93, primals_94, buf92, 8192, grid=grid(8192), stream=stream0)
del primals_93
# Topologically Sorted Source Nodes: [input_62], Original ATen: [aten.convolution]
buf93 = extern_kernels.convolution(buf92, primals_95, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf93, (4, 128, 4, 4), (2048, 16, 4, 1))
buf94 = buf93; del buf93 # reuse
# Topologically Sorted Source Nodes: [input_62], Original ATen: [aten.convolution]
triton_poi_fused_convolution_7.run(buf94, primals_96, 8192, grid=grid(8192), stream=stream0)
del primals_96
buf95 = empty_strided_cuda((4, 384, 4, 4), (6144, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_9], Original ATen: [aten.cat]
triton_poi_fused_cat_8.run(buf89, buf92, buf94, primals_97, buf95, 24576, grid=grid(24576), stream=stream0)
# Topologically Sorted Source Nodes: [input_64], Original ATen: [aten.convolution]
buf96 = extern_kernels.convolution(buf95, primals_98, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf96, (4, 512, 4, 4), (8192, 16, 4, 1))
buf97 = buf96; del buf96 # reuse
buf98 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_64, input_65], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_9.run(buf97, primals_99, primals_100, buf98, 32768, grid=grid(32768), stream=stream0)
del primals_99
# Topologically Sorted Source Nodes: [input_66], Original ATen: [aten.convolution]
buf99 = extern_kernels.convolution(buf98, primals_101, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf99, (4, 4, 4, 4), (64, 16, 4, 1))
buf100 = buf99; del buf99 # reuse
# Topologically Sorted Source Nodes: [input_66], Original ATen: [aten.convolution]
triton_poi_fused_convolution_10.run(buf100, primals_102, 256, grid=grid(256), stream=stream0)
del primals_102
return (buf100, primals_3, primals_5, primals_6, primals_8, primals_9, primals_11, primals_12, primals_14, primals_15, primals_17, primals_18, primals_20, primals_21, primals_23, primals_24, primals_26, primals_27, primals_29, primals_30, primals_32, primals_33, primals_35, primals_36, primals_38, primals_39, primals_41, primals_42, primals_44, primals_45, primals_47, primals_48, primals_50, primals_51, primals_53, primals_55, primals_56, primals_58, primals_59, primals_61, primals_62, primals_64, primals_65, primals_67, primals_68, primals_70, primals_71, primals_73, primals_74, primals_76, primals_77, primals_79, primals_80, primals_82, primals_83, primals_85, primals_86, primals_88, primals_89, primals_91, primals_92, primals_94, primals_95, primals_97, primals_98, primals_100, primals_101, buf0, buf2, buf3, buf5, buf6, buf8, buf9, buf11, buf12, buf14, buf15, buf17, buf18, buf20, buf21, buf23, buf24, buf26, buf27, buf29, buf30, buf32, buf33, buf35, buf36, buf38, buf39, buf41, buf42, buf44, buf45, buf47, buf48, buf50, buf52, buf53, buf55, buf56, buf58, buf59, buf61, buf62, buf64, buf65, buf67, buf68, buf70, buf71, buf73, buf74, buf76, buf77, buf79, buf80, buf82, buf83, buf85, buf86, buf88, buf89, buf91, buf92, buf94, buf95, buf97, buf98, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1, 4, 4), (16, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 3, 4, 4), (48, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((96, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((96, 96, 3, 3), (864, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((96, 96, 3, 3), (864, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((96, 288, 3, 3), (2592, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((96, 96, 3, 3), (864, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((96, 96, 3, 3), (864, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((96, 288, 3, 3), (2592, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((96, 96, 3, 3), (864, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((96, 96, 3, 3), (864, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_28 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_29 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_30 = rand_strided((96, 288, 3, 3), (2592, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_31 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_32 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_33 = rand_strided((96, 96, 3, 3), (864, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_34 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_35 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_36 = rand_strided((96, 96, 3, 3), (864, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_37 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_38 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_39 = rand_strided((96, 288, 3, 3), (2592, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_40 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_41 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_42 = rand_strided((96, 96, 3, 3), (864, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_43 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_44 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_45 = rand_strided((96, 96, 3, 3), (864, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_46 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_47 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_48 = rand_strided((256, 288, 1, 1), (288, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_49 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_50 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_51 = rand_strided((4, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_52 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_53 = rand_strided((128, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_54 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_55 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_56 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_57 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_58 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_59 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_60 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_61 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_62 = rand_strided((128, 384, 3, 3), (3456, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_63 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_64 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_65 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_66 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_67 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_68 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_69 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_70 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_71 = rand_strided((128, 384, 3, 3), (3456, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_72 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_73 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_74 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_75 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_76 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_77 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_78 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_79 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_80 = rand_strided((128, 384, 3, 3), (3456, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_81 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_82 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_83 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_84 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_85 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_86 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_87 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_88 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_89 = rand_strided((128, 384, 3, 3), (3456, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_90 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_91 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_92 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_93 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_94 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_95 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_96 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_97 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_98 = rand_strided((512, 384, 1, 1), (384, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_99 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_100 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_101 = rand_strided((4, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_102 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from itertools import chain as chain
from collections import OrderedDict
import torch.hub
class concatLayer(nn.Module):
def __init__(self, in_channels, out_channels_perSub, i, j, appendix):
super(concatLayer, self).__init__()
self.firstSub = self.concatLayerSub(in_channels,
out_channels_perSub, '%d_stage%d_' % (i, j) + appendix + '_0')
self.secondSub = self.concatLayerSub(out_channels_perSub,
out_channels_perSub, '%d_stage%d_' % (i, j) + appendix + '_1')
self.thirdSub = self.concatLayerSub(out_channels_perSub,
out_channels_perSub, '%d_stage%d_' % (i, j) + appendix + '_2')
def forward(self, x):
firstSub = self.firstSub(x)
secondSub = self.secondSub(firstSub)
thirdSub = self.thirdSub(secondSub)
out = torch.cat([firstSub, secondSub, thirdSub], 1)
return out
def concatLayerSub(self, in_channels, out_channels, layerName):
concatLayerSubOrdered = OrderedDict()
conv2d = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1)
concatLayerSubOrdered.update({('Mconv' + layerName): conv2d})
concatLayerSubOrdered.update({('Mprelu' + layerName): nn.PReLU(
out_channels)})
return nn.Sequential(concatLayerSubOrdered)
class stage(nn.Module):
def __init__(self, stageID, in_channels, out_channels_perSub,
mid_channels, out_channels, appendix):
super(stage, self).__init__()
self.firstConcat = concatLayer(in_channels, out_channels_perSub, 1,
stageID, appendix)
self.secondConcat = concatLayer(3 * out_channels_perSub,
out_channels_perSub, 2, stageID, appendix)
self.thirdConcat = concatLayer(3 * out_channels_perSub,
out_channels_perSub, 3, stageID, appendix)
self.fourthConcat = concatLayer(3 * out_channels_perSub,
out_channels_perSub, 4, stageID, appendix)
self.fifthConcat = concatLayer(3 * out_channels_perSub,
out_channels_perSub, 5, stageID, appendix)
conv2d = nn.Conv2d(3 * out_channels_perSub, mid_channels,
kernel_size=1, padding=0)
prelu = nn.PReLU(mid_channels)
self.afterConcatsFirst = nn.Sequential(OrderedDict({(
'Mconv6_stage%d_%s' % (stageID, appendix)): conv2d, (
'Mprelu6_stage%d_%s' % (stageID, appendix)): prelu}))
conv2d = nn.Conv2d(mid_channels, out_channels, kernel_size=1, padding=0
)
self.afterConcatsSecond = nn.Sequential(OrderedDict({(
'Mconv7_stage%d_%s' % (stageID, appendix)): conv2d}))
def forward(self, x):
x = self.firstConcat(x)
x = self.secondConcat(x)
x = self.thirdConcat(x)
x = self.fourthConcat(x)
x = self.fifthConcat(x)
x = self.afterConcatsFirst(x)
out = self.afterConcatsSecond(x)
return out
class L1Part(nn.Module):
def __init__(self, in_channels, stage_out_channels):
super(L1Part, self).__init__()
self.firstStage = stage(0, in_channels, 96, 256, stage_out_channels,
'L1')
self.secondStage = stage(1, in_channels + stage_out_channels, 128,
512, stage_out_channels, 'L1')
def forward(self, features, L2Out):
x = torch.cat([features, L2Out], 1)
x = self.firstStage(x)
x = torch.cat([features, x, L2Out], 1)
out = self.secondStage(x)
return out
def get_inputs():
return [torch.rand([4, 1, 4, 4]), torch.rand([4, 3, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'stage_out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from itertools import chain as chain
from collections import OrderedDict
import torch.hub
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 4
x0 = xindex % 16
x2 = xindex // 64
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 4, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-1 + x1) + 48 * x2), tmp6 & xmask,
other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused__prelu_kernel_convolution_1(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 96
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp6 = tmp5 * tmp2
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + x3, tmp2, None)
tl.store(out_ptr0 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 96
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 16 % 288
x0 = xindex % 16
x2 = xindex // 4608
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 96, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 1536 * x2), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 192, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 16 * (-96 + x1) + 1536 * x2), tmp9,
other=0.0)
tmp11 = tmp0 >= tmp7
tl.full([1], 288, tl.int64)
tmp14 = tl.load(in_ptr2 + (x0 + 16 * (-192 + x1) + 1536 * x2), tmp11,
other=0.0)
tmp15 = 0.0
tmp16 = tmp14 > tmp15
tmp17 = tl.load(in_ptr3 + (-192 + x1), tmp11, eviction_policy=
'evict_last', other=0.0)
tmp18 = tmp17 * tmp14
tmp19 = tl.where(tmp16, tmp14, tmp18)
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp11, tmp19, tmp20)
tmp22 = tl.where(tmp9, tmp10, tmp21)
tmp23 = tl.where(tmp4, tmp5, tmp22)
tl.store(out_ptr0 + x3, tmp23, None)
@triton.jit
def triton_poi_fused__prelu_kernel_convolution_4(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp6 = tmp5 * tmp2
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + x3, tmp2, None)
tl.store(out_ptr0 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 8
x0 = xindex % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 5, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 16 * (-1 + x1) + 64 * x2), tmp9 & xmask,
other=0.0)
tmp11 = tl.load(in_ptr2 + (-1 + x1), tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp9, tmp12, tmp13)
tmp15 = tmp0 >= tmp7
tl.full([1], 8, tl.int64)
tmp18 = tl.load(in_ptr3 + (x0 + 16 * (-5 + x1) + 48 * x2), tmp15 &
xmask, other=0.0)
tmp19 = tl.where(tmp9, tmp14, tmp18)
tmp20 = tl.where(tmp4, tmp5, tmp19)
tl.store(out_ptr0 + x3, tmp20, xmask)
@triton.jit
def triton_poi_fused__prelu_kernel_convolution_6(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp6 = tmp5 * tmp2
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + x3, tmp2, None)
tl.store(out_ptr0 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_convolution_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_cat_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 16 % 384
x0 = xindex % 16
x2 = xindex // 6144
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 128, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 2048 * x2), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 256, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 16 * (-128 + x1) + 2048 * x2), tmp9,
other=0.0)
tmp11 = tmp0 >= tmp7
tl.full([1], 384, tl.int64)
tmp14 = tl.load(in_ptr2 + (x0 + 16 * (-256 + x1) + 2048 * x2), tmp11,
other=0.0)
tmp15 = 0.0
tmp16 = tmp14 > tmp15
tmp17 = tl.load(in_ptr3 + (-256 + x1), tmp11, eviction_policy=
'evict_last', other=0.0)
tmp18 = tmp17 * tmp14
tmp19 = tl.where(tmp16, tmp14, tmp18)
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp11, tmp19, tmp20)
tmp22 = tl.where(tmp9, tmp10, tmp21)
tmp23 = tl.where(tmp4, tmp5, tmp22)
tl.store(out_ptr0 + x3, tmp23, None)
@triton.jit
def triton_poi_fused__prelu_kernel_convolution_9(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 512
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp6 = tmp5 * tmp2
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + x3, tmp2, None)
tl.store(out_ptr0 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_convolution_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40, primals_41, primals_42,
primals_43, primals_44, primals_45, primals_46, primals_47,
primals_48, primals_49, primals_50, primals_51, primals_52,
primals_53, primals_54, primals_55, primals_56, primals_57,
primals_58, primals_59, primals_60, primals_61, primals_62,
primals_63, primals_64, primals_65, primals_66, primals_67,
primals_68, primals_69, primals_70, primals_71, primals_72,
primals_73, primals_74, primals_75, primals_76, primals_77,
primals_78, primals_79, primals_80, primals_81, primals_82,
primals_83, primals_84, primals_85, primals_86, primals_87,
primals_88, primals_89, primals_90, primals_91, primals_92,
primals_93, primals_94, primals_95, primals_96, primals_97,
primals_98, primals_99, primals_100, primals_101, primals_102) = args
args.clear()
assert_size_stride(primals_1, (4, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_2, (4, 3, 4, 4), (48, 16, 4, 1))
assert_size_stride(primals_3, (96, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_4, (96,), (1,))
assert_size_stride(primals_5, (96,), (1,))
assert_size_stride(primals_6, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_7, (96,), (1,))
assert_size_stride(primals_8, (96,), (1,))
assert_size_stride(primals_9, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_10, (96,), (1,))
assert_size_stride(primals_11, (96,), (1,))
assert_size_stride(primals_12, (96, 288, 3, 3), (2592, 9, 3, 1))
assert_size_stride(primals_13, (96,), (1,))
assert_size_stride(primals_14, (96,), (1,))
assert_size_stride(primals_15, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_16, (96,), (1,))
assert_size_stride(primals_17, (96,), (1,))
assert_size_stride(primals_18, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_19, (96,), (1,))
assert_size_stride(primals_20, (96,), (1,))
assert_size_stride(primals_21, (96, 288, 3, 3), (2592, 9, 3, 1))
assert_size_stride(primals_22, (96,), (1,))
assert_size_stride(primals_23, (96,), (1,))
assert_size_stride(primals_24, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_25, (96,), (1,))
assert_size_stride(primals_26, (96,), (1,))
assert_size_stride(primals_27, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_28, (96,), (1,))
assert_size_stride(primals_29, (96,), (1,))
assert_size_stride(primals_30, (96, 288, 3, 3), (2592, 9, 3, 1))
assert_size_stride(primals_31, (96,), (1,))
assert_size_stride(primals_32, (96,), (1,))
assert_size_stride(primals_33, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_34, (96,), (1,))
assert_size_stride(primals_35, (96,), (1,))
assert_size_stride(primals_36, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_37, (96,), (1,))
assert_size_stride(primals_38, (96,), (1,))
assert_size_stride(primals_39, (96, 288, 3, 3), (2592, 9, 3, 1))
assert_size_stride(primals_40, (96,), (1,))
assert_size_stride(primals_41, (96,), (1,))
assert_size_stride(primals_42, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_43, (96,), (1,))
assert_size_stride(primals_44, (96,), (1,))
assert_size_stride(primals_45, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_46, (96,), (1,))
assert_size_stride(primals_47, (96,), (1,))
assert_size_stride(primals_48, (256, 288, 1, 1), (288, 1, 1, 1))
assert_size_stride(primals_49, (256,), (1,))
assert_size_stride(primals_50, (256,), (1,))
assert_size_stride(primals_51, (4, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_52, (4,), (1,))
assert_size_stride(primals_53, (128, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_54, (128,), (1,))
assert_size_stride(primals_55, (128,), (1,))
assert_size_stride(primals_56, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_57, (128,), (1,))
assert_size_stride(primals_58, (128,), (1,))
assert_size_stride(primals_59, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_60, (128,), (1,))
assert_size_stride(primals_61, (128,), (1,))
assert_size_stride(primals_62, (128, 384, 3, 3), (3456, 9, 3, 1))
assert_size_stride(primals_63, (128,), (1,))
assert_size_stride(primals_64, (128,), (1,))
assert_size_stride(primals_65, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_66, (128,), (1,))
assert_size_stride(primals_67, (128,), (1,))
assert_size_stride(primals_68, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_69, (128,), (1,))
assert_size_stride(primals_70, (128,), (1,))
assert_size_stride(primals_71, (128, 384, 3, 3), (3456, 9, 3, 1))
assert_size_stride(primals_72, (128,), (1,))
assert_size_stride(primals_73, (128,), (1,))
assert_size_stride(primals_74, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_75, (128,), (1,))
assert_size_stride(primals_76, (128,), (1,))
assert_size_stride(primals_77, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_78, (128,), (1,))
assert_size_stride(primals_79, (128,), (1,))
assert_size_stride(primals_80, (128, 384, 3, 3), (3456, 9, 3, 1))
assert_size_stride(primals_81, (128,), (1,))
assert_size_stride(primals_82, (128,), (1,))
assert_size_stride(primals_83, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_84, (128,), (1,))
assert_size_stride(primals_85, (128,), (1,))
assert_size_stride(primals_86, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_87, (128,), (1,))
assert_size_stride(primals_88, (128,), (1,))
assert_size_stride(primals_89, (128, 384, 3, 3), (3456, 9, 3, 1))
assert_size_stride(primals_90, (128,), (1,))
assert_size_stride(primals_91, (128,), (1,))
assert_size_stride(primals_92, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_93, (128,), (1,))
assert_size_stride(primals_94, (128,), (1,))
assert_size_stride(primals_95, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_96, (128,), (1,))
assert_size_stride(primals_97, (128,), (1,))
assert_size_stride(primals_98, (512, 384, 1, 1), (384, 1, 1, 1))
assert_size_stride(primals_99, (512,), (1,))
assert_size_stride(primals_100, (512,), (1,))
assert_size_stride(primals_101, (4, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_102, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(256)](primals_1, primals_2, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 96, 4, 4), (1536, 16, 4, 1))
buf2 = buf1
del buf1
buf3 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.
float32)
triton_poi_fused__prelu_kernel_convolution_1[grid(6144)](buf2,
primals_4, primals_5, buf3, 6144, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_4
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 96, 4, 4), (1536, 16, 4, 1))
buf5 = buf4
del buf4
buf6 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.
float32)
triton_poi_fused__prelu_kernel_convolution_1[grid(6144)](buf5,
primals_7, primals_8, buf6, 6144, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_7
buf7 = extern_kernels.convolution(buf6, primals_9, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 96, 4, 4), (1536, 16, 4, 1))
buf8 = buf7
del buf7
triton_poi_fused_convolution_2[grid(6144)](buf8, primals_10, 6144,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_10
buf9 = empty_strided_cuda((4, 288, 4, 4), (4608, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_3[grid(18432)](buf3, buf6, buf8, primals_11,
buf9, 18432, XBLOCK=256, num_warps=4, num_stages=1)
buf10 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 96, 4, 4), (1536, 16, 4, 1))
buf11 = buf10
del buf10
buf12 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.
float32)
triton_poi_fused__prelu_kernel_convolution_1[grid(6144)](buf11,
primals_13, primals_14, buf12, 6144, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_13
buf13 = extern_kernels.convolution(buf12, primals_15, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 96, 4, 4), (1536, 16, 4, 1))
buf14 = buf13
del buf13
buf15 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.
float32)
triton_poi_fused__prelu_kernel_convolution_1[grid(6144)](buf14,
primals_16, primals_17, buf15, 6144, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_16
buf16 = extern_kernels.convolution(buf15, primals_18, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 96, 4, 4), (1536, 16, 4, 1))
buf17 = buf16
del buf16
triton_poi_fused_convolution_2[grid(6144)](buf17, primals_19, 6144,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_19
buf18 = empty_strided_cuda((4, 288, 4, 4), (4608, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_3[grid(18432)](buf12, buf15, buf17, primals_20,
buf18, 18432, XBLOCK=256, num_warps=4, num_stages=1)
buf19 = extern_kernels.convolution(buf18, primals_21, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf19, (4, 96, 4, 4), (1536, 16, 4, 1))
buf20 = buf19
del buf19
buf21 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.
float32)
triton_poi_fused__prelu_kernel_convolution_1[grid(6144)](buf20,
primals_22, primals_23, buf21, 6144, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_22
buf22 = extern_kernels.convolution(buf21, primals_24, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 96, 4, 4), (1536, 16, 4, 1))
buf23 = buf22
del buf22
buf24 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.
float32)
triton_poi_fused__prelu_kernel_convolution_1[grid(6144)](buf23,
primals_25, primals_26, buf24, 6144, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_25
buf25 = extern_kernels.convolution(buf24, primals_27, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 96, 4, 4), (1536, 16, 4, 1))
buf26 = buf25
del buf25
triton_poi_fused_convolution_2[grid(6144)](buf26, primals_28, 6144,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_28
buf27 = empty_strided_cuda((4, 288, 4, 4), (4608, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_3[grid(18432)](buf21, buf24, buf26, primals_29,
buf27, 18432, XBLOCK=256, num_warps=4, num_stages=1)
buf28 = extern_kernels.convolution(buf27, primals_30, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 96, 4, 4), (1536, 16, 4, 1))
buf29 = buf28
del buf28
buf30 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.
float32)
triton_poi_fused__prelu_kernel_convolution_1[grid(6144)](buf29,
primals_31, primals_32, buf30, 6144, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_31
buf31 = extern_kernels.convolution(buf30, primals_33, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf31, (4, 96, 4, 4), (1536, 16, 4, 1))
buf32 = buf31
del buf31
buf33 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.
float32)
triton_poi_fused__prelu_kernel_convolution_1[grid(6144)](buf32,
primals_34, primals_35, buf33, 6144, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_34
buf34 = extern_kernels.convolution(buf33, primals_36, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf34, (4, 96, 4, 4), (1536, 16, 4, 1))
buf35 = buf34
del buf34
triton_poi_fused_convolution_2[grid(6144)](buf35, primals_37, 6144,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_37
buf36 = empty_strided_cuda((4, 288, 4, 4), (4608, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_3[grid(18432)](buf30, buf33, buf35, primals_38,
buf36, 18432, XBLOCK=256, num_warps=4, num_stages=1)
buf37 = extern_kernels.convolution(buf36, primals_39, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf37, (4, 96, 4, 4), (1536, 16, 4, 1))
buf38 = buf37
del buf37
buf39 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.
float32)
triton_poi_fused__prelu_kernel_convolution_1[grid(6144)](buf38,
primals_40, primals_41, buf39, 6144, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_40
buf40 = extern_kernels.convolution(buf39, primals_42, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf40, (4, 96, 4, 4), (1536, 16, 4, 1))
buf41 = buf40
del buf40
buf42 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.
float32)
triton_poi_fused__prelu_kernel_convolution_1[grid(6144)](buf41,
primals_43, primals_44, buf42, 6144, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_43
buf43 = extern_kernels.convolution(buf42, primals_45, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf43, (4, 96, 4, 4), (1536, 16, 4, 1))
buf44 = buf43
del buf43
triton_poi_fused_convolution_2[grid(6144)](buf44, primals_46, 6144,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_46
buf45 = empty_strided_cuda((4, 288, 4, 4), (4608, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_3[grid(18432)](buf39, buf42, buf44, primals_47,
buf45, 18432, XBLOCK=256, num_warps=4, num_stages=1)
buf46 = extern_kernels.convolution(buf45, primals_48, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf46, (4, 256, 4, 4), (4096, 16, 4, 1))
buf47 = buf46
del buf46
buf48 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.
float32)
triton_poi_fused__prelu_kernel_convolution_4[grid(16384)](buf47,
primals_49, primals_50, buf48, 16384, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_49
buf49 = extern_kernels.convolution(buf48, primals_51, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf49, (4, 4, 4, 4), (64, 16, 4, 1))
buf50 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32
)
triton_poi_fused_cat_5[grid(512)](primals_1, buf49, primals_52,
primals_2, buf50, 512, XBLOCK=128, num_warps=4, num_stages=1)
del buf49
del primals_1
del primals_2
del primals_52
buf51 = extern_kernels.convolution(buf50, primals_53, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf51, (4, 128, 4, 4), (2048, 16, 4, 1))
buf52 = buf51
del buf51
buf53 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.
float32)
triton_poi_fused__prelu_kernel_convolution_6[grid(8192)](buf52,
primals_54, primals_55, buf53, 8192, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_54
buf54 = extern_kernels.convolution(buf53, primals_56, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf54, (4, 128, 4, 4), (2048, 16, 4, 1))
buf55 = buf54
del buf54
buf56 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.
float32)
triton_poi_fused__prelu_kernel_convolution_6[grid(8192)](buf55,
primals_57, primals_58, buf56, 8192, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_57
buf57 = extern_kernels.convolution(buf56, primals_59, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf57, (4, 128, 4, 4), (2048, 16, 4, 1))
buf58 = buf57
del buf57
triton_poi_fused_convolution_7[grid(8192)](buf58, primals_60, 8192,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_60
buf59 = empty_strided_cuda((4, 384, 4, 4), (6144, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_8[grid(24576)](buf53, buf56, buf58, primals_61,
buf59, 24576, XBLOCK=256, num_warps=4, num_stages=1)
buf60 = extern_kernels.convolution(buf59, primals_62, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf60, (4, 128, 4, 4), (2048, 16, 4, 1))
buf61 = buf60
del buf60
buf62 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.
float32)
triton_poi_fused__prelu_kernel_convolution_6[grid(8192)](buf61,
primals_63, primals_64, buf62, 8192, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_63
buf63 = extern_kernels.convolution(buf62, primals_65, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf63, (4, 128, 4, 4), (2048, 16, 4, 1))
buf64 = buf63
del buf63
buf65 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.
float32)
triton_poi_fused__prelu_kernel_convolution_6[grid(8192)](buf64,
primals_66, primals_67, buf65, 8192, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_66
buf66 = extern_kernels.convolution(buf65, primals_68, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf66, (4, 128, 4, 4), (2048, 16, 4, 1))
buf67 = buf66
del buf66
triton_poi_fused_convolution_7[grid(8192)](buf67, primals_69, 8192,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_69
buf68 = empty_strided_cuda((4, 384, 4, 4), (6144, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_8[grid(24576)](buf62, buf65, buf67, primals_70,
buf68, 24576, XBLOCK=256, num_warps=4, num_stages=1)
buf69 = extern_kernels.convolution(buf68, primals_71, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf69, (4, 128, 4, 4), (2048, 16, 4, 1))
buf70 = buf69
del buf69
buf71 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.
float32)
triton_poi_fused__prelu_kernel_convolution_6[grid(8192)](buf70,
primals_72, primals_73, buf71, 8192, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_72
buf72 = extern_kernels.convolution(buf71, primals_74, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf72, (4, 128, 4, 4), (2048, 16, 4, 1))
buf73 = buf72
del buf72
buf74 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.
float32)
triton_poi_fused__prelu_kernel_convolution_6[grid(8192)](buf73,
primals_75, primals_76, buf74, 8192, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_75
buf75 = extern_kernels.convolution(buf74, primals_77, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf75, (4, 128, 4, 4), (2048, 16, 4, 1))
buf76 = buf75
del buf75
triton_poi_fused_convolution_7[grid(8192)](buf76, primals_78, 8192,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_78
buf77 = empty_strided_cuda((4, 384, 4, 4), (6144, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_8[grid(24576)](buf71, buf74, buf76, primals_79,
buf77, 24576, XBLOCK=256, num_warps=4, num_stages=1)
buf78 = extern_kernels.convolution(buf77, primals_80, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf78, (4, 128, 4, 4), (2048, 16, 4, 1))
buf79 = buf78
del buf78
buf80 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.
float32)
triton_poi_fused__prelu_kernel_convolution_6[grid(8192)](buf79,
primals_81, primals_82, buf80, 8192, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_81
buf81 = extern_kernels.convolution(buf80, primals_83, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf81, (4, 128, 4, 4), (2048, 16, 4, 1))
buf82 = buf81
del buf81
buf83 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.
float32)
triton_poi_fused__prelu_kernel_convolution_6[grid(8192)](buf82,
primals_84, primals_85, buf83, 8192, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_84
buf84 = extern_kernels.convolution(buf83, primals_86, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf84, (4, 128, 4, 4), (2048, 16, 4, 1))
buf85 = buf84
del buf84
triton_poi_fused_convolution_7[grid(8192)](buf85, primals_87, 8192,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_87
buf86 = empty_strided_cuda((4, 384, 4, 4), (6144, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_8[grid(24576)](buf80, buf83, buf85, primals_88,
buf86, 24576, XBLOCK=256, num_warps=4, num_stages=1)
buf87 = extern_kernels.convolution(buf86, primals_89, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf87, (4, 128, 4, 4), (2048, 16, 4, 1))
buf88 = buf87
del buf87
buf89 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.
float32)
triton_poi_fused__prelu_kernel_convolution_6[grid(8192)](buf88,
primals_90, primals_91, buf89, 8192, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_90
buf90 = extern_kernels.convolution(buf89, primals_92, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf90, (4, 128, 4, 4), (2048, 16, 4, 1))
buf91 = buf90
del buf90
buf92 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.
float32)
triton_poi_fused__prelu_kernel_convolution_6[grid(8192)](buf91,
primals_93, primals_94, buf92, 8192, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_93
buf93 = extern_kernels.convolution(buf92, primals_95, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf93, (4, 128, 4, 4), (2048, 16, 4, 1))
buf94 = buf93
del buf93
triton_poi_fused_convolution_7[grid(8192)](buf94, primals_96, 8192,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_96
buf95 = empty_strided_cuda((4, 384, 4, 4), (6144, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_8[grid(24576)](buf89, buf92, buf94, primals_97,
buf95, 24576, XBLOCK=256, num_warps=4, num_stages=1)
buf96 = extern_kernels.convolution(buf95, primals_98, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf96, (4, 512, 4, 4), (8192, 16, 4, 1))
buf97 = buf96
del buf96
buf98 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.
float32)
triton_poi_fused__prelu_kernel_convolution_9[grid(32768)](buf97,
primals_99, primals_100, buf98, 32768, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_99
buf99 = extern_kernels.convolution(buf98, primals_101, stride=(1, 1
), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf99, (4, 4, 4, 4), (64, 16, 4, 1))
buf100 = buf99
del buf99
triton_poi_fused_convolution_10[grid(256)](buf100, primals_102, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_102
return (buf100, primals_3, primals_5, primals_6, primals_8, primals_9,
primals_11, primals_12, primals_14, primals_15, primals_17,
primals_18, primals_20, primals_21, primals_23, primals_24,
primals_26, primals_27, primals_29, primals_30, primals_32,
primals_33, primals_35, primals_36, primals_38, primals_39,
primals_41, primals_42, primals_44, primals_45, primals_47,
primals_48, primals_50, primals_51, primals_53, primals_55,
primals_56, primals_58, primals_59, primals_61, primals_62,
primals_64, primals_65, primals_67, primals_68, primals_70,
primals_71, primals_73, primals_74, primals_76, primals_77,
primals_79, primals_80, primals_82, primals_83, primals_85,
primals_86, primals_88, primals_89, primals_91, primals_92,
primals_94, primals_95, primals_97, primals_98, primals_100,
primals_101, buf0, buf2, buf3, buf5, buf6, buf8, buf9, buf11, buf12,
buf14, buf15, buf17, buf18, buf20, buf21, buf23, buf24, buf26,
buf27, buf29, buf30, buf32, buf33, buf35, buf36, buf38, buf39,
buf41, buf42, buf44, buf45, buf47, buf48, buf50, buf52, buf53,
buf55, buf56, buf58, buf59, buf61, buf62, buf64, buf65, buf67,
buf68, buf70, buf71, buf73, buf74, buf76, buf77, buf79, buf80,
buf82, buf83, buf85, buf86, buf88, buf89, buf91, buf92, buf94,
buf95, buf97, buf98)
class concatLayer(nn.Module):
def __init__(self, in_channels, out_channels_perSub, i, j, appendix):
super(concatLayer, self).__init__()
self.firstSub = self.concatLayerSub(in_channels,
out_channels_perSub, '%d_stage%d_' % (i, j) + appendix + '_0')
self.secondSub = self.concatLayerSub(out_channels_perSub,
out_channels_perSub, '%d_stage%d_' % (i, j) + appendix + '_1')
self.thirdSub = self.concatLayerSub(out_channels_perSub,
out_channels_perSub, '%d_stage%d_' % (i, j) + appendix + '_2')
def forward(self, x):
firstSub = self.firstSub(x)
secondSub = self.secondSub(firstSub)
thirdSub = self.thirdSub(secondSub)
out = torch.cat([firstSub, secondSub, thirdSub], 1)
return out
def concatLayerSub(self, in_channels, out_channels, layerName):
concatLayerSubOrdered = OrderedDict()
conv2d = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1)
concatLayerSubOrdered.update({('Mconv' + layerName): conv2d})
concatLayerSubOrdered.update({('Mprelu' + layerName): nn.PReLU(
out_channels)})
return nn.Sequential(concatLayerSubOrdered)
class stage(nn.Module):
def __init__(self, stageID, in_channels, out_channels_perSub,
mid_channels, out_channels, appendix):
super(stage, self).__init__()
self.firstConcat = concatLayer(in_channels, out_channels_perSub, 1,
stageID, appendix)
self.secondConcat = concatLayer(3 * out_channels_perSub,
out_channels_perSub, 2, stageID, appendix)
self.thirdConcat = concatLayer(3 * out_channels_perSub,
out_channels_perSub, 3, stageID, appendix)
self.fourthConcat = concatLayer(3 * out_channels_perSub,
out_channels_perSub, 4, stageID, appendix)
self.fifthConcat = concatLayer(3 * out_channels_perSub,
out_channels_perSub, 5, stageID, appendix)
conv2d = nn.Conv2d(3 * out_channels_perSub, mid_channels,
kernel_size=1, padding=0)
prelu = nn.PReLU(mid_channels)
self.afterConcatsFirst = nn.Sequential(OrderedDict({(
'Mconv6_stage%d_%s' % (stageID, appendix)): conv2d, (
'Mprelu6_stage%d_%s' % (stageID, appendix)): prelu}))
conv2d = nn.Conv2d(mid_channels, out_channels, kernel_size=1, padding=0
)
self.afterConcatsSecond = nn.Sequential(OrderedDict({(
'Mconv7_stage%d_%s' % (stageID, appendix)): conv2d}))
def forward(self, x):
x = self.firstConcat(x)
x = self.secondConcat(x)
x = self.thirdConcat(x)
x = self.fourthConcat(x)
x = self.fifthConcat(x)
x = self.afterConcatsFirst(x)
out = self.afterConcatsSecond(x)
return out
class L1PartNew(nn.Module):
def __init__(self, in_channels, stage_out_channels):
super(L1PartNew, self).__init__()
self.firstStage = stage(0, in_channels, 96, 256, stage_out_channels,
'L1')
self.secondStage = stage(1, in_channels + stage_out_channels, 128,
512, stage_out_channels, 'L1')
def forward(self, input_0, input_1):
primals_3 = (self.firstStage.firstConcat.firstSub.
Mconv1_stage0_L1_0.weight)
primals_4 = (self.firstStage.firstConcat.firstSub.
Mconv1_stage0_L1_0.bias)
primals_5 = (self.firstStage.firstConcat.firstSub.
Mprelu1_stage0_L1_0.weight)
primals_6 = (self.firstStage.firstConcat.secondSub.
Mconv1_stage0_L1_1.weight)
primals_7 = (self.firstStage.firstConcat.secondSub.
Mconv1_stage0_L1_1.bias)
primals_8 = (self.firstStage.firstConcat.secondSub.
Mprelu1_stage0_L1_1.weight)
primals_9 = (self.firstStage.firstConcat.thirdSub.
Mconv1_stage0_L1_2.weight)
primals_10 = (self.firstStage.firstConcat.thirdSub.
Mconv1_stage0_L1_2.bias)
primals_11 = (self.firstStage.firstConcat.thirdSub.
Mprelu1_stage0_L1_2.weight)
primals_12 = (self.firstStage.secondConcat.firstSub.
Mconv2_stage0_L1_0.weight)
primals_13 = (self.firstStage.secondConcat.firstSub.
Mconv2_stage0_L1_0.bias)
primals_14 = (self.firstStage.secondConcat.firstSub.
Mprelu2_stage0_L1_0.weight)
primals_15 = (self.firstStage.secondConcat.secondSub.
Mconv2_stage0_L1_1.weight)
primals_16 = (self.firstStage.secondConcat.secondSub.
Mconv2_stage0_L1_1.bias)
primals_17 = (self.firstStage.secondConcat.secondSub.
Mprelu2_stage0_L1_1.weight)
primals_18 = (self.firstStage.secondConcat.thirdSub.
Mconv2_stage0_L1_2.weight)
primals_19 = (self.firstStage.secondConcat.thirdSub.
Mconv2_stage0_L1_2.bias)
primals_20 = (self.firstStage.secondConcat.thirdSub.
Mprelu2_stage0_L1_2.weight)
primals_21 = (self.firstStage.thirdConcat.firstSub.
Mconv3_stage0_L1_0.weight)
primals_22 = (self.firstStage.thirdConcat.firstSub.
Mconv3_stage0_L1_0.bias)
primals_23 = (self.firstStage.thirdConcat.firstSub.
Mprelu3_stage0_L1_0.weight)
primals_24 = (self.firstStage.thirdConcat.secondSub.
Mconv3_stage0_L1_1.weight)
primals_25 = (self.firstStage.thirdConcat.secondSub.
Mconv3_stage0_L1_1.bias)
primals_26 = (self.firstStage.thirdConcat.secondSub.
Mprelu3_stage0_L1_1.weight)
primals_27 = (self.firstStage.thirdConcat.thirdSub.
Mconv3_stage0_L1_2.weight)
primals_28 = (self.firstStage.thirdConcat.thirdSub.
Mconv3_stage0_L1_2.bias)
primals_29 = (self.firstStage.thirdConcat.thirdSub.
Mprelu3_stage0_L1_2.weight)
primals_30 = (self.firstStage.fourthConcat.firstSub.
Mconv4_stage0_L1_0.weight)
primals_31 = (self.firstStage.fourthConcat.firstSub.
Mconv4_stage0_L1_0.bias)
primals_32 = (self.firstStage.fourthConcat.firstSub.
Mprelu4_stage0_L1_0.weight)
primals_33 = (self.firstStage.fourthConcat.secondSub.
Mconv4_stage0_L1_1.weight)
primals_34 = (self.firstStage.fourthConcat.secondSub.
Mconv4_stage0_L1_1.bias)
primals_35 = (self.firstStage.fourthConcat.secondSub.
Mprelu4_stage0_L1_1.weight)
primals_36 = (self.firstStage.fourthConcat.thirdSub.
Mconv4_stage0_L1_2.weight)
primals_37 = (self.firstStage.fourthConcat.thirdSub.
Mconv4_stage0_L1_2.bias)
primals_38 = (self.firstStage.fourthConcat.thirdSub.
Mprelu4_stage0_L1_2.weight)
primals_39 = (self.firstStage.fifthConcat.firstSub.
Mconv5_stage0_L1_0.weight)
primals_40 = (self.firstStage.fifthConcat.firstSub.
Mconv5_stage0_L1_0.bias)
primals_41 = (self.firstStage.fifthConcat.firstSub.
Mprelu5_stage0_L1_0.weight)
primals_42 = (self.firstStage.fifthConcat.secondSub.
Mconv5_stage0_L1_1.weight)
primals_43 = (self.firstStage.fifthConcat.secondSub.
Mconv5_stage0_L1_1.bias)
primals_44 = (self.firstStage.fifthConcat.secondSub.
Mprelu5_stage0_L1_1.weight)
primals_45 = (self.firstStage.fifthConcat.thirdSub.
Mconv5_stage0_L1_2.weight)
primals_46 = (self.firstStage.fifthConcat.thirdSub.
Mconv5_stage0_L1_2.bias)
primals_47 = (self.firstStage.fifthConcat.thirdSub.
Mprelu5_stage0_L1_2.weight)
primals_48 = self.firstStage.afterConcatsFirst.Mconv6_stage0_L1.weight
primals_49 = self.firstStage.afterConcatsFirst.Mconv6_stage0_L1.bias
primals_50 = self.firstStage.afterConcatsFirst.Mprelu6_stage0_L1.weight
primals_51 = self.firstStage.afterConcatsSecond.Mconv7_stage0_L1.weight
primals_52 = self.firstStage.afterConcatsSecond.Mconv7_stage0_L1.bias
primals_53 = (self.secondStage.firstConcat.firstSub.
Mconv1_stage1_L1_0.weight)
primals_54 = (self.secondStage.firstConcat.firstSub.
Mconv1_stage1_L1_0.bias)
primals_55 = (self.secondStage.firstConcat.firstSub.
Mprelu1_stage1_L1_0.weight)
primals_56 = (self.secondStage.firstConcat.secondSub.
Mconv1_stage1_L1_1.weight)
primals_57 = (self.secondStage.firstConcat.secondSub.
Mconv1_stage1_L1_1.bias)
primals_58 = (self.secondStage.firstConcat.secondSub.
Mprelu1_stage1_L1_1.weight)
primals_59 = (self.secondStage.firstConcat.thirdSub.
Mconv1_stage1_L1_2.weight)
primals_60 = (self.secondStage.firstConcat.thirdSub.
Mconv1_stage1_L1_2.bias)
primals_61 = (self.secondStage.firstConcat.thirdSub.
Mprelu1_stage1_L1_2.weight)
primals_62 = (self.secondStage.secondConcat.firstSub.
Mconv2_stage1_L1_0.weight)
primals_63 = (self.secondStage.secondConcat.firstSub.
Mconv2_stage1_L1_0.bias)
primals_64 = (self.secondStage.secondConcat.firstSub.
Mprelu2_stage1_L1_0.weight)
primals_65 = (self.secondStage.secondConcat.secondSub.
Mconv2_stage1_L1_1.weight)
primals_66 = (self.secondStage.secondConcat.secondSub.
Mconv2_stage1_L1_1.bias)
primals_67 = (self.secondStage.secondConcat.secondSub.
Mprelu2_stage1_L1_1.weight)
primals_68 = (self.secondStage.secondConcat.thirdSub.
Mconv2_stage1_L1_2.weight)
primals_69 = (self.secondStage.secondConcat.thirdSub.
Mconv2_stage1_L1_2.bias)
primals_70 = (self.secondStage.secondConcat.thirdSub.
Mprelu2_stage1_L1_2.weight)
primals_71 = (self.secondStage.thirdConcat.firstSub.
Mconv3_stage1_L1_0.weight)
primals_72 = (self.secondStage.thirdConcat.firstSub.
Mconv3_stage1_L1_0.bias)
primals_73 = (self.secondStage.thirdConcat.firstSub.
Mprelu3_stage1_L1_0.weight)
primals_74 = (self.secondStage.thirdConcat.secondSub.
Mconv3_stage1_L1_1.weight)
primals_75 = (self.secondStage.thirdConcat.secondSub.
Mconv3_stage1_L1_1.bias)
primals_76 = (self.secondStage.thirdConcat.secondSub.
Mprelu3_stage1_L1_1.weight)
primals_77 = (self.secondStage.thirdConcat.thirdSub.
Mconv3_stage1_L1_2.weight)
primals_78 = (self.secondStage.thirdConcat.thirdSub.
Mconv3_stage1_L1_2.bias)
primals_79 = (self.secondStage.thirdConcat.thirdSub.
Mprelu3_stage1_L1_2.weight)
primals_80 = (self.secondStage.fourthConcat.firstSub.
Mconv4_stage1_L1_0.weight)
primals_81 = (self.secondStage.fourthConcat.firstSub.
Mconv4_stage1_L1_0.bias)
primals_82 = (self.secondStage.fourthConcat.firstSub.
Mprelu4_stage1_L1_0.weight)
primals_83 = (self.secondStage.fourthConcat.secondSub.
Mconv4_stage1_L1_1.weight)
primals_84 = (self.secondStage.fourthConcat.secondSub.
Mconv4_stage1_L1_1.bias)
primals_85 = (self.secondStage.fourthConcat.secondSub.
Mprelu4_stage1_L1_1.weight)
primals_86 = (self.secondStage.fourthConcat.thirdSub.
Mconv4_stage1_L1_2.weight)
primals_87 = (self.secondStage.fourthConcat.thirdSub.
Mconv4_stage1_L1_2.bias)
primals_88 = (self.secondStage.fourthConcat.thirdSub.
Mprelu4_stage1_L1_2.weight)
primals_89 = (self.secondStage.fifthConcat.firstSub.
Mconv5_stage1_L1_0.weight)
primals_90 = (self.secondStage.fifthConcat.firstSub.
Mconv5_stage1_L1_0.bias)
primals_91 = (self.secondStage.fifthConcat.firstSub.
Mprelu5_stage1_L1_0.weight)
primals_92 = (self.secondStage.fifthConcat.secondSub.
Mconv5_stage1_L1_1.weight)
primals_93 = (self.secondStage.fifthConcat.secondSub.
Mconv5_stage1_L1_1.bias)
primals_94 = (self.secondStage.fifthConcat.secondSub.
Mprelu5_stage1_L1_1.weight)
primals_95 = (self.secondStage.fifthConcat.thirdSub.
Mconv5_stage1_L1_2.weight)
primals_96 = (self.secondStage.fifthConcat.thirdSub.
Mconv5_stage1_L1_2.bias)
primals_97 = (self.secondStage.fifthConcat.thirdSub.
Mprelu5_stage1_L1_2.weight)
primals_98 = self.secondStage.afterConcatsFirst.Mconv6_stage1_L1.weight
primals_99 = self.secondStage.afterConcatsFirst.Mconv6_stage1_L1.bias
primals_100 = (self.secondStage.afterConcatsFirst.Mprelu6_stage1_L1
.weight)
primals_101 = (self.secondStage.afterConcatsSecond.Mconv7_stage1_L1
.weight)
primals_102 = self.secondStage.afterConcatsSecond.Mconv7_stage1_L1.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40, primals_41, primals_42, primals_43, primals_44,
primals_45, primals_46, primals_47, primals_48, primals_49,
primals_50, primals_51, primals_52, primals_53, primals_54,
primals_55, primals_56, primals_57, primals_58, primals_59,
primals_60, primals_61, primals_62, primals_63, primals_64,
primals_65, primals_66, primals_67, primals_68, primals_69,
primals_70, primals_71, primals_72, primals_73, primals_74,
primals_75, primals_76, primals_77, primals_78, primals_79,
primals_80, primals_81, primals_82, primals_83, primals_84,
primals_85, primals_86, primals_87, primals_88, primals_89,
primals_90, primals_91, primals_92, primals_93, primals_94,
primals_95, primals_96, primals_97, primals_98, primals_99,
primals_100, primals_101, primals_102])
return output[0]
| EddieMG/LateTemporalModeling3DCNN | L1Part | false | 2,362 | [
"MIT"
] | 0 | 94c87dc1d31d09bc310d0e735a2e55453976cb0d | https://github.com/EddieMG/LateTemporalModeling3DCNN/tree/94c87dc1d31d09bc310d0e735a2e55453976cb0d | import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from itertools import chain as chain
from collections import OrderedDict
import torch.hub
class concatLayer(nn.Module):
def __init__(self, in_channels, out_channels_perSub, i, j, appendix):
super().__init__()
self.firstSub = self.concatLayerSub(in_channels,
out_channels_perSub, '%d_stage%d_' % (i, j) + appendix + '_0')
self.secondSub = self.concatLayerSub(out_channels_perSub,
out_channels_perSub, '%d_stage%d_' % (i, j) + appendix + '_1')
self.thirdSub = self.concatLayerSub(out_channels_perSub,
out_channels_perSub, '%d_stage%d_' % (i, j) + appendix + '_2')
def forward(self, x):
firstSub = self.firstSub(x)
secondSub = self.secondSub(firstSub)
thirdSub = self.thirdSub(secondSub)
out = torch.cat([firstSub, secondSub, thirdSub], 1)
return out
def concatLayerSub(self, in_channels, out_channels, layerName):
concatLayerSubOrdered = OrderedDict()
conv2d = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1)
concatLayerSubOrdered.update({('Mconv' + layerName): conv2d})
concatLayerSubOrdered.update({('Mprelu' + layerName): nn.PReLU(
out_channels)})
return nn.Sequential(concatLayerSubOrdered)
class stage(nn.Module):
def __init__(self, stageID, in_channels, out_channels_perSub,
mid_channels, out_channels, appendix):
super().__init__()
self.firstConcat = concatLayer(in_channels, out_channels_perSub, 1,
stageID, appendix)
self.secondConcat = concatLayer(3 * out_channels_perSub,
out_channels_perSub, 2, stageID, appendix)
self.thirdConcat = concatLayer(3 * out_channels_perSub,
out_channels_perSub, 3, stageID, appendix)
self.fourthConcat = concatLayer(3 * out_channels_perSub,
out_channels_perSub, 4, stageID, appendix)
self.fifthConcat = concatLayer(3 * out_channels_perSub,
out_channels_perSub, 5, stageID, appendix)
conv2d = nn.Conv2d(3 * out_channels_perSub, mid_channels,
kernel_size=1, padding=0)
prelu = nn.PReLU(mid_channels)
self.afterConcatsFirst = nn.Sequential(OrderedDict({(
'Mconv6_stage%d_%s' % (stageID, appendix)): conv2d, (
'Mprelu6_stage%d_%s' % (stageID, appendix)): prelu}))
conv2d = nn.Conv2d(mid_channels, out_channels, kernel_size=1, padding=0
)
self.afterConcatsSecond = nn.Sequential(OrderedDict({(
'Mconv7_stage%d_%s' % (stageID, appendix)): conv2d}))
def forward(self, x):
x = self.firstConcat(x)
x = self.secondConcat(x)
x = self.thirdConcat(x)
x = self.fourthConcat(x)
x = self.fifthConcat(x)
x = self.afterConcatsFirst(x)
out = self.afterConcatsSecond(x)
return out
class Model(nn.Module):
def __init__(self, in_channels, stage_out_channels):
super().__init__()
self.firstStage = stage(0, in_channels, 96, 256, stage_out_channels,
'L1')
self.secondStage = stage(1, in_channels + stage_out_channels, 128,
512, stage_out_channels, 'L1')
def forward(self, features, L2Out):
x = torch.cat([features, L2Out], 1)
x = self.firstStage(x)
x = torch.cat([features, x, L2Out], 1)
out = self.secondStage(x)
return out
def get_inputs():
return [torch.rand([4, 1, 4, 4]), torch.rand([4, 3, 4, 4])]
def get_init_inputs():
return [4, 4]
|
L2Part | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/4s/c4shpza3w2x6smnond437qr5xjawce63dxxrkoendepulf3powr2.py
# Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten._prelu_kernel]
# Source node to ATen node mapping:
# input_1 => convolution
# input_2 => gt, mul, where
# Graph fragment:
# %convolution : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %convolution), kwargs = {})
# %where : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
triton_poi_fused__prelu_kernel_convolution_0 = async_compile.triton('triton_poi_fused__prelu_kernel_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__prelu_kernel_convolution_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 6144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 96
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp6 = tmp5 * tmp2
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + (x3), tmp2, None)
tl.store(out_ptr0 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/6u/c6uipwl3edw55eg25a2tdrei2ze57knqjuw4iammxf6xwqiwxef7.py
# Topologically Sorted Source Nodes: [input_5], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# input_5 => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%where_1, %primals_8, %primals_9, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 6144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 96
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/qj/cqjgnkoee4mry62wexljuvrqjqh5nmfvfgx4olkf2lkyeroqeapg.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# out => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%where, %where_1, %where_2], 1), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 18432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 16) % 288
x0 = xindex % 16
x2 = (xindex // 4608)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 96, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (1536*x2)), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 192, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + (16*((-96) + x1)) + (1536*x2)), tmp9, other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 288, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tl.load(in_ptr2 + (x0 + (16*((-192) + x1)) + (1536*x2)), tmp11, other=0.0)
tmp15 = 0.0
tmp16 = tmp14 > tmp15
tmp17 = tl.load(in_ptr3 + ((-192) + x1), tmp11, eviction_policy='evict_last', other=0.0)
tmp18 = tmp17 * tmp14
tmp19 = tl.where(tmp16, tmp14, tmp18)
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp11, tmp19, tmp20)
tmp22 = tl.where(tmp9, tmp10, tmp21)
tmp23 = tl.where(tmp4, tmp5, tmp22)
tl.store(out_ptr0 + (x3), tmp23, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/dz/cdzdzjbmnoov6dzfvnkby3cyo2iugwp6b5vxmnrlf5qvsdsoqruo.py
# Topologically Sorted Source Nodes: [input_31, input_32], Original ATen: [aten.convolution, aten._prelu_kernel]
# Source node to ATen node mapping:
# input_31 => convolution_15
# input_32 => gt_15, mul_15, where_15
# Graph fragment:
# %convolution_15 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_4, %primals_47, %primals_48, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_15 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_15, 0), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_15, %convolution_15), kwargs = {})
# %where_15 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_15, %convolution_15, %mul_15), kwargs = {})
triton_poi_fused__prelu_kernel_convolution_3 = async_compile.triton('triton_poi_fused__prelu_kernel_convolution_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_convolution_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__prelu_kernel_convolution_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 8
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp6 = tmp5 * tmp2
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
tl.store(out_ptr0 + (x3), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/hr/chry3xppwp5bniw3d3lahl3d3vqmyzphj3u55wsnqylmzluqi555.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x => cat_5
# Graph fragment:
# %cat_5 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_3, %convolution_16], 1), kwargs = {})
triton_poi_fused_cat_4 = async_compile.triton('triton_poi_fused_cat_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 8
x0 = xindex % 16
x2 = (xindex // 128)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp6 & xmask, other=0.0)
tmp10 = tl.load(in_ptr2 + ((-4) + x1), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = tl.where(tmp4, tmp5, tmp13)
tl.store(out_ptr0 + (x3), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ie/ciefsncpfrhpitcubu6g5kncfiv2ggi5tll7qllaeu2wbfivltuw.py
# Topologically Sorted Source Nodes: [input_34, input_35], Original ATen: [aten.convolution, aten._prelu_kernel]
# Source node to ATen node mapping:
# input_34 => convolution_17
# input_35 => gt_16, mul_16, where_16
# Graph fragment:
# %convolution_17 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_5, %primals_52, %primals_53, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_16 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_17, 0), kwargs = {})
# %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_16, %convolution_17), kwargs = {})
# %where_16 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_16, %convolution_17, %mul_16), kwargs = {})
triton_poi_fused__prelu_kernel_convolution_5 = async_compile.triton('triton_poi_fused__prelu_kernel_convolution_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_convolution_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__prelu_kernel_convolution_5(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp6 = tmp5 * tmp2
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
tl.store(out_ptr0 + (x3), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/kg/ckgzqia26vn3v7x3apvntzstonjsgd3pomd6nly5yomjycxxg5jc.py
# Topologically Sorted Source Nodes: [input_38], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# input_38 => convolution_19
# Graph fragment:
# %convolution_19 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%where_17, %primals_58, %primals_59, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_6 = async_compile.triton('triton_poi_fused_convolution_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/bp/cbpfpictss7xdvt3z7xgqt2rjd7wyrwjm3suz23gs42mjniqhijs.py
# Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# out_5 => cat_6
# Graph fragment:
# %cat_6 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%where_16, %where_17, %where_18], 1), kwargs = {})
triton_poi_fused_cat_7 = async_compile.triton('triton_poi_fused_cat_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 12
x0 = xindex % 16
x2 = (xindex // 192)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp9 & xmask, other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tl.load(in_ptr2 + (x0 + (16*((-8) + x1)) + (64*x2)), tmp11 & xmask, other=0.0)
tmp15 = 0.0
tmp16 = tmp14 > tmp15
tmp17 = tl.load(in_ptr3 + ((-8) + x1), tmp11 & xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tmp17 * tmp14
tmp19 = tl.where(tmp16, tmp14, tmp18)
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp11, tmp19, tmp20)
tmp22 = tl.where(tmp9, tmp10, tmp21)
tmp23 = tl.where(tmp4, tmp5, tmp22)
tl.store(out_ptr0 + (x3), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/l4/cl4ns2lerodjhdybkbaug54sjgpvfoad4ffsvmsbcqpkqaenpbl2.py
# Topologically Sorted Source Nodes: [input_64, input_65], Original ATen: [aten.convolution, aten._prelu_kernel]
# Source node to ATen node mapping:
# input_64 => convolution_32
# input_65 => gt_31, mul_31, where_31
# Graph fragment:
# %convolution_32 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_10, %primals_97, %primals_98, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_31 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_32, 0), kwargs = {})
# %mul_31 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_31, %convolution_32), kwargs = {})
# %where_31 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_31, %convolution_32, %mul_31), kwargs = {})
triton_poi_fused__prelu_kernel_convolution_8 = async_compile.triton('triton_poi_fused__prelu_kernel_convolution_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_convolution_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__prelu_kernel_convolution_8(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp6 = tmp5 * tmp2
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
tl.store(out_ptr0 + (x3), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102, primals_103, primals_104, primals_105, primals_106, primals_107, primals_108, primals_109, primals_110, primals_111, primals_112, primals_113, primals_114, primals_115, primals_116, primals_117, primals_118, primals_119, primals_120, primals_121, primals_122, primals_123, primals_124, primals_125, primals_126, primals_127, primals_128, primals_129, primals_130, primals_131, primals_132, primals_133, primals_134, primals_135, primals_136, primals_137, primals_138, primals_139, primals_140, primals_141, primals_142, primals_143, primals_144, primals_145, primals_146, primals_147, primals_148, primals_149, primals_150, primals_151, primals_152, primals_153, primals_154, primals_155, primals_156, primals_157, primals_158, primals_159, primals_160, primals_161, primals_162, primals_163, primals_164, primals_165, primals_166, primals_167, primals_168, primals_169, primals_170, primals_171, primals_172, primals_173, primals_174, primals_175, primals_176, primals_177, primals_178, primals_179, primals_180, primals_181, primals_182, primals_183, primals_184, primals_185, primals_186, primals_187, primals_188, primals_189, primals_190, primals_191, primals_192, primals_193, primals_194, primals_195, primals_196, primals_197, primals_198, primals_199, primals_200, primals_201 = args
args.clear()
assert_size_stride(primals_1, (96, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (96, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (96, ), (1, ))
assert_size_stride(primals_5, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_6, (96, ), (1, ))
assert_size_stride(primals_7, (96, ), (1, ))
assert_size_stride(primals_8, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_9, (96, ), (1, ))
assert_size_stride(primals_10, (96, ), (1, ))
assert_size_stride(primals_11, (96, 288, 3, 3), (2592, 9, 3, 1))
assert_size_stride(primals_12, (96, ), (1, ))
assert_size_stride(primals_13, (96, ), (1, ))
assert_size_stride(primals_14, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_15, (96, ), (1, ))
assert_size_stride(primals_16, (96, ), (1, ))
assert_size_stride(primals_17, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_18, (96, ), (1, ))
assert_size_stride(primals_19, (96, ), (1, ))
assert_size_stride(primals_20, (96, 288, 3, 3), (2592, 9, 3, 1))
assert_size_stride(primals_21, (96, ), (1, ))
assert_size_stride(primals_22, (96, ), (1, ))
assert_size_stride(primals_23, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_24, (96, ), (1, ))
assert_size_stride(primals_25, (96, ), (1, ))
assert_size_stride(primals_26, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_27, (96, ), (1, ))
assert_size_stride(primals_28, (96, ), (1, ))
assert_size_stride(primals_29, (96, 288, 3, 3), (2592, 9, 3, 1))
assert_size_stride(primals_30, (96, ), (1, ))
assert_size_stride(primals_31, (96, ), (1, ))
assert_size_stride(primals_32, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_33, (96, ), (1, ))
assert_size_stride(primals_34, (96, ), (1, ))
assert_size_stride(primals_35, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_36, (96, ), (1, ))
assert_size_stride(primals_37, (96, ), (1, ))
assert_size_stride(primals_38, (96, 288, 3, 3), (2592, 9, 3, 1))
assert_size_stride(primals_39, (96, ), (1, ))
assert_size_stride(primals_40, (96, ), (1, ))
assert_size_stride(primals_41, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_42, (96, ), (1, ))
assert_size_stride(primals_43, (96, ), (1, ))
assert_size_stride(primals_44, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_45, (96, ), (1, ))
assert_size_stride(primals_46, (96, ), (1, ))
assert_size_stride(primals_47, (8, 288, 1, 1), (288, 1, 1, 1))
assert_size_stride(primals_48, (8, ), (1, ))
assert_size_stride(primals_49, (8, ), (1, ))
assert_size_stride(primals_50, (4, 8, 1, 1), (8, 1, 1, 1))
assert_size_stride(primals_51, (4, ), (1, ))
assert_size_stride(primals_52, (4, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_53, (4, ), (1, ))
assert_size_stride(primals_54, (4, ), (1, ))
assert_size_stride(primals_55, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_56, (4, ), (1, ))
assert_size_stride(primals_57, (4, ), (1, ))
assert_size_stride(primals_58, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_59, (4, ), (1, ))
assert_size_stride(primals_60, (4, ), (1, ))
assert_size_stride(primals_61, (4, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_62, (4, ), (1, ))
assert_size_stride(primals_63, (4, ), (1, ))
assert_size_stride(primals_64, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_65, (4, ), (1, ))
assert_size_stride(primals_66, (4, ), (1, ))
assert_size_stride(primals_67, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_68, (4, ), (1, ))
assert_size_stride(primals_69, (4, ), (1, ))
assert_size_stride(primals_70, (4, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_71, (4, ), (1, ))
assert_size_stride(primals_72, (4, ), (1, ))
assert_size_stride(primals_73, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_74, (4, ), (1, ))
assert_size_stride(primals_75, (4, ), (1, ))
assert_size_stride(primals_76, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_77, (4, ), (1, ))
assert_size_stride(primals_78, (4, ), (1, ))
assert_size_stride(primals_79, (4, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_80, (4, ), (1, ))
assert_size_stride(primals_81, (4, ), (1, ))
assert_size_stride(primals_82, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_83, (4, ), (1, ))
assert_size_stride(primals_84, (4, ), (1, ))
assert_size_stride(primals_85, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_86, (4, ), (1, ))
assert_size_stride(primals_87, (4, ), (1, ))
assert_size_stride(primals_88, (4, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_89, (4, ), (1, ))
assert_size_stride(primals_90, (4, ), (1, ))
assert_size_stride(primals_91, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_92, (4, ), (1, ))
assert_size_stride(primals_93, (4, ), (1, ))
assert_size_stride(primals_94, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_95, (4, ), (1, ))
assert_size_stride(primals_96, (4, ), (1, ))
assert_size_stride(primals_97, (16, 12, 1, 1), (12, 1, 1, 1))
assert_size_stride(primals_98, (16, ), (1, ))
assert_size_stride(primals_99, (16, ), (1, ))
assert_size_stride(primals_100, (4, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_101, (4, ), (1, ))
assert_size_stride(primals_102, (4, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_103, (4, ), (1, ))
assert_size_stride(primals_104, (4, ), (1, ))
assert_size_stride(primals_105, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_106, (4, ), (1, ))
assert_size_stride(primals_107, (4, ), (1, ))
assert_size_stride(primals_108, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_109, (4, ), (1, ))
assert_size_stride(primals_110, (4, ), (1, ))
assert_size_stride(primals_111, (4, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_112, (4, ), (1, ))
assert_size_stride(primals_113, (4, ), (1, ))
assert_size_stride(primals_114, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_115, (4, ), (1, ))
assert_size_stride(primals_116, (4, ), (1, ))
assert_size_stride(primals_117, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_118, (4, ), (1, ))
assert_size_stride(primals_119, (4, ), (1, ))
assert_size_stride(primals_120, (4, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_121, (4, ), (1, ))
assert_size_stride(primals_122, (4, ), (1, ))
assert_size_stride(primals_123, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_124, (4, ), (1, ))
assert_size_stride(primals_125, (4, ), (1, ))
assert_size_stride(primals_126, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_127, (4, ), (1, ))
assert_size_stride(primals_128, (4, ), (1, ))
assert_size_stride(primals_129, (4, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_130, (4, ), (1, ))
assert_size_stride(primals_131, (4, ), (1, ))
assert_size_stride(primals_132, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_133, (4, ), (1, ))
assert_size_stride(primals_134, (4, ), (1, ))
assert_size_stride(primals_135, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_136, (4, ), (1, ))
assert_size_stride(primals_137, (4, ), (1, ))
assert_size_stride(primals_138, (4, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_139, (4, ), (1, ))
assert_size_stride(primals_140, (4, ), (1, ))
assert_size_stride(primals_141, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_142, (4, ), (1, ))
assert_size_stride(primals_143, (4, ), (1, ))
assert_size_stride(primals_144, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_145, (4, ), (1, ))
assert_size_stride(primals_146, (4, ), (1, ))
assert_size_stride(primals_147, (16, 12, 1, 1), (12, 1, 1, 1))
assert_size_stride(primals_148, (16, ), (1, ))
assert_size_stride(primals_149, (16, ), (1, ))
assert_size_stride(primals_150, (4, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_151, (4, ), (1, ))
assert_size_stride(primals_152, (4, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_153, (4, ), (1, ))
assert_size_stride(primals_154, (4, ), (1, ))
assert_size_stride(primals_155, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_156, (4, ), (1, ))
assert_size_stride(primals_157, (4, ), (1, ))
assert_size_stride(primals_158, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_159, (4, ), (1, ))
assert_size_stride(primals_160, (4, ), (1, ))
assert_size_stride(primals_161, (4, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_162, (4, ), (1, ))
assert_size_stride(primals_163, (4, ), (1, ))
assert_size_stride(primals_164, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_165, (4, ), (1, ))
assert_size_stride(primals_166, (4, ), (1, ))
assert_size_stride(primals_167, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_168, (4, ), (1, ))
assert_size_stride(primals_169, (4, ), (1, ))
assert_size_stride(primals_170, (4, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_171, (4, ), (1, ))
assert_size_stride(primals_172, (4, ), (1, ))
assert_size_stride(primals_173, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_174, (4, ), (1, ))
assert_size_stride(primals_175, (4, ), (1, ))
assert_size_stride(primals_176, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_177, (4, ), (1, ))
assert_size_stride(primals_178, (4, ), (1, ))
assert_size_stride(primals_179, (4, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_180, (4, ), (1, ))
assert_size_stride(primals_181, (4, ), (1, ))
assert_size_stride(primals_182, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_183, (4, ), (1, ))
assert_size_stride(primals_184, (4, ), (1, ))
assert_size_stride(primals_185, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_186, (4, ), (1, ))
assert_size_stride(primals_187, (4, ), (1, ))
assert_size_stride(primals_188, (4, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_189, (4, ), (1, ))
assert_size_stride(primals_190, (4, ), (1, ))
assert_size_stride(primals_191, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_192, (4, ), (1, ))
assert_size_stride(primals_193, (4, ), (1, ))
assert_size_stride(primals_194, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_195, (4, ), (1, ))
assert_size_stride(primals_196, (4, ), (1, ))
assert_size_stride(primals_197, (16, 12, 1, 1), (12, 1, 1, 1))
assert_size_stride(primals_198, (16, ), (1, ))
assert_size_stride(primals_199, (16, ), (1, ))
assert_size_stride(primals_200, (4, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_201, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 96, 4, 4), (1536, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten._prelu_kernel]
stream0 = get_raw_stream(0)
triton_poi_fused__prelu_kernel_convolution_0.run(buf1, primals_2, primals_4, buf2, 6144, grid=grid(6144), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 96, 4, 4), (1536, 16, 4, 1))
buf4 = buf3; del buf3 # reuse
buf5 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_3, input_4], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_0.run(buf4, primals_6, primals_7, buf5, 6144, grid=grid(6144), stream=stream0)
del primals_6
# Topologically Sorted Source Nodes: [input_5], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 96, 4, 4), (1536, 16, 4, 1))
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [input_5], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf7, primals_9, 6144, grid=grid(6144), stream=stream0)
del primals_9
buf8 = empty_strided_cuda((4, 288, 4, 4), (4608, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(buf2, buf5, buf7, primals_10, buf8, 18432, grid=grid(18432), stream=stream0)
# Topologically Sorted Source Nodes: [input_7], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf8, primals_11, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 96, 4, 4), (1536, 16, 4, 1))
buf10 = buf9; del buf9 # reuse
buf11 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_7, input_8], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_0.run(buf10, primals_12, primals_13, buf11, 6144, grid=grid(6144), stream=stream0)
del primals_12
# Topologically Sorted Source Nodes: [input_9], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf11, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 96, 4, 4), (1536, 16, 4, 1))
buf13 = buf12; del buf12 # reuse
buf14 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_9, input_10], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_0.run(buf13, primals_15, primals_16, buf14, 6144, grid=grid(6144), stream=stream0)
del primals_15
# Topologically Sorted Source Nodes: [input_11], Original ATen: [aten.convolution]
buf15 = extern_kernels.convolution(buf14, primals_17, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 96, 4, 4), (1536, 16, 4, 1))
buf16 = buf15; del buf15 # reuse
# Topologically Sorted Source Nodes: [input_11], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf16, primals_18, 6144, grid=grid(6144), stream=stream0)
del primals_18
buf17 = empty_strided_cuda((4, 288, 4, 4), (4608, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(buf11, buf14, buf16, primals_19, buf17, 18432, grid=grid(18432), stream=stream0)
# Topologically Sorted Source Nodes: [input_13], Original ATen: [aten.convolution]
buf18 = extern_kernels.convolution(buf17, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 96, 4, 4), (1536, 16, 4, 1))
buf19 = buf18; del buf18 # reuse
buf20 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_13, input_14], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_0.run(buf19, primals_21, primals_22, buf20, 6144, grid=grid(6144), stream=stream0)
del primals_21
# Topologically Sorted Source Nodes: [input_15], Original ATen: [aten.convolution]
buf21 = extern_kernels.convolution(buf20, primals_23, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf21, (4, 96, 4, 4), (1536, 16, 4, 1))
buf22 = buf21; del buf21 # reuse
buf23 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_15, input_16], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_0.run(buf22, primals_24, primals_25, buf23, 6144, grid=grid(6144), stream=stream0)
del primals_24
# Topologically Sorted Source Nodes: [input_17], Original ATen: [aten.convolution]
buf24 = extern_kernels.convolution(buf23, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 96, 4, 4), (1536, 16, 4, 1))
buf25 = buf24; del buf24 # reuse
# Topologically Sorted Source Nodes: [input_17], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf25, primals_27, 6144, grid=grid(6144), stream=stream0)
del primals_27
buf26 = empty_strided_cuda((4, 288, 4, 4), (4608, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(buf20, buf23, buf25, primals_28, buf26, 18432, grid=grid(18432), stream=stream0)
# Topologically Sorted Source Nodes: [input_19], Original ATen: [aten.convolution]
buf27 = extern_kernels.convolution(buf26, primals_29, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf27, (4, 96, 4, 4), (1536, 16, 4, 1))
buf28 = buf27; del buf27 # reuse
buf29 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_19, input_20], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_0.run(buf28, primals_30, primals_31, buf29, 6144, grid=grid(6144), stream=stream0)
del primals_30
# Topologically Sorted Source Nodes: [input_21], Original ATen: [aten.convolution]
buf30 = extern_kernels.convolution(buf29, primals_32, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 96, 4, 4), (1536, 16, 4, 1))
buf31 = buf30; del buf30 # reuse
buf32 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_21, input_22], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_0.run(buf31, primals_33, primals_34, buf32, 6144, grid=grid(6144), stream=stream0)
del primals_33
# Topologically Sorted Source Nodes: [input_23], Original ATen: [aten.convolution]
buf33 = extern_kernels.convolution(buf32, primals_35, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf33, (4, 96, 4, 4), (1536, 16, 4, 1))
buf34 = buf33; del buf33 # reuse
# Topologically Sorted Source Nodes: [input_23], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf34, primals_36, 6144, grid=grid(6144), stream=stream0)
del primals_36
buf35 = empty_strided_cuda((4, 288, 4, 4), (4608, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(buf29, buf32, buf34, primals_37, buf35, 18432, grid=grid(18432), stream=stream0)
# Topologically Sorted Source Nodes: [input_25], Original ATen: [aten.convolution]
buf36 = extern_kernels.convolution(buf35, primals_38, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf36, (4, 96, 4, 4), (1536, 16, 4, 1))
buf37 = buf36; del buf36 # reuse
buf38 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_25, input_26], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_0.run(buf37, primals_39, primals_40, buf38, 6144, grid=grid(6144), stream=stream0)
del primals_39
# Topologically Sorted Source Nodes: [input_27], Original ATen: [aten.convolution]
buf39 = extern_kernels.convolution(buf38, primals_41, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf39, (4, 96, 4, 4), (1536, 16, 4, 1))
buf40 = buf39; del buf39 # reuse
buf41 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_27, input_28], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_0.run(buf40, primals_42, primals_43, buf41, 6144, grid=grid(6144), stream=stream0)
del primals_42
# Topologically Sorted Source Nodes: [input_29], Original ATen: [aten.convolution]
buf42 = extern_kernels.convolution(buf41, primals_44, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf42, (4, 96, 4, 4), (1536, 16, 4, 1))
buf43 = buf42; del buf42 # reuse
# Topologically Sorted Source Nodes: [input_29], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf43, primals_45, 6144, grid=grid(6144), stream=stream0)
del primals_45
buf44 = empty_strided_cuda((4, 288, 4, 4), (4608, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(buf38, buf41, buf43, primals_46, buf44, 18432, grid=grid(18432), stream=stream0)
# Topologically Sorted Source Nodes: [input_31], Original ATen: [aten.convolution]
buf45 = extern_kernels.convolution(buf44, primals_47, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf45, (4, 8, 4, 4), (128, 16, 4, 1))
buf46 = buf45; del buf45 # reuse
buf47 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_31, input_32], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_3.run(buf46, primals_48, primals_49, buf47, 512, grid=grid(512), stream=stream0)
del primals_48
# Topologically Sorted Source Nodes: [input_33], Original ATen: [aten.convolution]
buf48 = extern_kernels.convolution(buf47, primals_50, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf48, (4, 4, 4, 4), (64, 16, 4, 1))
buf49 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
triton_poi_fused_cat_4.run(primals_3, buf48, primals_51, buf49, 512, grid=grid(512), stream=stream0)
del primals_51
# Topologically Sorted Source Nodes: [input_34], Original ATen: [aten.convolution]
buf50 = extern_kernels.convolution(buf49, primals_52, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf50, (4, 4, 4, 4), (64, 16, 4, 1))
buf51 = buf50; del buf50 # reuse
buf52 = buf48; del buf48 # reuse
# Topologically Sorted Source Nodes: [input_34, input_35], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_5.run(buf51, primals_53, primals_54, buf52, 256, grid=grid(256), stream=stream0)
del primals_53
# Topologically Sorted Source Nodes: [input_36], Original ATen: [aten.convolution]
buf53 = extern_kernels.convolution(buf52, primals_55, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf53, (4, 4, 4, 4), (64, 16, 4, 1))
buf54 = buf53; del buf53 # reuse
buf55 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_36, input_37], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_5.run(buf54, primals_56, primals_57, buf55, 256, grid=grid(256), stream=stream0)
del primals_56
# Topologically Sorted Source Nodes: [input_38], Original ATen: [aten.convolution]
buf56 = extern_kernels.convolution(buf55, primals_58, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf56, (4, 4, 4, 4), (64, 16, 4, 1))
buf57 = buf56; del buf56 # reuse
# Topologically Sorted Source Nodes: [input_38], Original ATen: [aten.convolution]
triton_poi_fused_convolution_6.run(buf57, primals_59, 256, grid=grid(256), stream=stream0)
del primals_59
buf58 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.cat]
triton_poi_fused_cat_7.run(buf52, buf55, buf57, primals_60, buf58, 768, grid=grid(768), stream=stream0)
# Topologically Sorted Source Nodes: [input_40], Original ATen: [aten.convolution]
buf59 = extern_kernels.convolution(buf58, primals_61, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf59, (4, 4, 4, 4), (64, 16, 4, 1))
buf60 = buf59; del buf59 # reuse
buf61 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_40, input_41], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_5.run(buf60, primals_62, primals_63, buf61, 256, grid=grid(256), stream=stream0)
del primals_62
# Topologically Sorted Source Nodes: [input_42], Original ATen: [aten.convolution]
buf62 = extern_kernels.convolution(buf61, primals_64, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf62, (4, 4, 4, 4), (64, 16, 4, 1))
buf63 = buf62; del buf62 # reuse
buf64 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_42, input_43], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_5.run(buf63, primals_65, primals_66, buf64, 256, grid=grid(256), stream=stream0)
del primals_65
# Topologically Sorted Source Nodes: [input_44], Original ATen: [aten.convolution]
buf65 = extern_kernels.convolution(buf64, primals_67, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf65, (4, 4, 4, 4), (64, 16, 4, 1))
buf66 = buf65; del buf65 # reuse
# Topologically Sorted Source Nodes: [input_44], Original ATen: [aten.convolution]
triton_poi_fused_convolution_6.run(buf66, primals_68, 256, grid=grid(256), stream=stream0)
del primals_68
buf67 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_6], Original ATen: [aten.cat]
triton_poi_fused_cat_7.run(buf61, buf64, buf66, primals_69, buf67, 768, grid=grid(768), stream=stream0)
# Topologically Sorted Source Nodes: [input_46], Original ATen: [aten.convolution]
buf68 = extern_kernels.convolution(buf67, primals_70, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf68, (4, 4, 4, 4), (64, 16, 4, 1))
buf69 = buf68; del buf68 # reuse
buf70 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_46, input_47], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_5.run(buf69, primals_71, primals_72, buf70, 256, grid=grid(256), stream=stream0)
del primals_71
# Topologically Sorted Source Nodes: [input_48], Original ATen: [aten.convolution]
buf71 = extern_kernels.convolution(buf70, primals_73, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf71, (4, 4, 4, 4), (64, 16, 4, 1))
buf72 = buf71; del buf71 # reuse
buf73 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_48, input_49], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_5.run(buf72, primals_74, primals_75, buf73, 256, grid=grid(256), stream=stream0)
del primals_74
# Topologically Sorted Source Nodes: [input_50], Original ATen: [aten.convolution]
buf74 = extern_kernels.convolution(buf73, primals_76, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf74, (4, 4, 4, 4), (64, 16, 4, 1))
buf75 = buf74; del buf74 # reuse
# Topologically Sorted Source Nodes: [input_50], Original ATen: [aten.convolution]
triton_poi_fused_convolution_6.run(buf75, primals_77, 256, grid=grid(256), stream=stream0)
del primals_77
buf76 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_7], Original ATen: [aten.cat]
triton_poi_fused_cat_7.run(buf70, buf73, buf75, primals_78, buf76, 768, grid=grid(768), stream=stream0)
# Topologically Sorted Source Nodes: [input_52], Original ATen: [aten.convolution]
buf77 = extern_kernels.convolution(buf76, primals_79, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf77, (4, 4, 4, 4), (64, 16, 4, 1))
buf78 = buf77; del buf77 # reuse
buf79 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_52, input_53], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_5.run(buf78, primals_80, primals_81, buf79, 256, grid=grid(256), stream=stream0)
del primals_80
# Topologically Sorted Source Nodes: [input_54], Original ATen: [aten.convolution]
buf80 = extern_kernels.convolution(buf79, primals_82, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf80, (4, 4, 4, 4), (64, 16, 4, 1))
buf81 = buf80; del buf80 # reuse
buf82 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_54, input_55], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_5.run(buf81, primals_83, primals_84, buf82, 256, grid=grid(256), stream=stream0)
del primals_83
# Topologically Sorted Source Nodes: [input_56], Original ATen: [aten.convolution]
buf83 = extern_kernels.convolution(buf82, primals_85, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf83, (4, 4, 4, 4), (64, 16, 4, 1))
buf84 = buf83; del buf83 # reuse
# Topologically Sorted Source Nodes: [input_56], Original ATen: [aten.convolution]
triton_poi_fused_convolution_6.run(buf84, primals_86, 256, grid=grid(256), stream=stream0)
del primals_86
buf85 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_8], Original ATen: [aten.cat]
triton_poi_fused_cat_7.run(buf79, buf82, buf84, primals_87, buf85, 768, grid=grid(768), stream=stream0)
# Topologically Sorted Source Nodes: [input_58], Original ATen: [aten.convolution]
buf86 = extern_kernels.convolution(buf85, primals_88, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf86, (4, 4, 4, 4), (64, 16, 4, 1))
buf87 = buf86; del buf86 # reuse
buf88 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_58, input_59], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_5.run(buf87, primals_89, primals_90, buf88, 256, grid=grid(256), stream=stream0)
del primals_89
# Topologically Sorted Source Nodes: [input_60], Original ATen: [aten.convolution]
buf89 = extern_kernels.convolution(buf88, primals_91, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf89, (4, 4, 4, 4), (64, 16, 4, 1))
buf90 = buf89; del buf89 # reuse
buf91 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_60, input_61], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_5.run(buf90, primals_92, primals_93, buf91, 256, grid=grid(256), stream=stream0)
del primals_92
# Topologically Sorted Source Nodes: [input_62], Original ATen: [aten.convolution]
buf92 = extern_kernels.convolution(buf91, primals_94, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf92, (4, 4, 4, 4), (64, 16, 4, 1))
buf93 = buf92; del buf92 # reuse
# Topologically Sorted Source Nodes: [input_62], Original ATen: [aten.convolution]
triton_poi_fused_convolution_6.run(buf93, primals_95, 256, grid=grid(256), stream=stream0)
del primals_95
buf94 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_9], Original ATen: [aten.cat]
triton_poi_fused_cat_7.run(buf88, buf91, buf93, primals_96, buf94, 768, grid=grid(768), stream=stream0)
# Topologically Sorted Source Nodes: [input_64], Original ATen: [aten.convolution]
buf95 = extern_kernels.convolution(buf94, primals_97, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf95, (4, 16, 4, 4), (256, 16, 4, 1))
buf96 = buf95; del buf95 # reuse
buf97 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_64, input_65], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_8.run(buf96, primals_98, primals_99, buf97, 1024, grid=grid(1024), stream=stream0)
del primals_98
# Topologically Sorted Source Nodes: [input_66], Original ATen: [aten.convolution]
buf98 = extern_kernels.convolution(buf97, primals_100, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf98, (4, 4, 4, 4), (64, 16, 4, 1))
buf99 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.cat]
triton_poi_fused_cat_4.run(primals_3, buf98, primals_101, buf99, 512, grid=grid(512), stream=stream0)
del primals_101
# Topologically Sorted Source Nodes: [input_67], Original ATen: [aten.convolution]
buf100 = extern_kernels.convolution(buf99, primals_102, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf100, (4, 4, 4, 4), (64, 16, 4, 1))
buf101 = buf100; del buf100 # reuse
buf102 = buf98; del buf98 # reuse
# Topologically Sorted Source Nodes: [input_67, input_68], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_5.run(buf101, primals_103, primals_104, buf102, 256, grid=grid(256), stream=stream0)
del primals_103
# Topologically Sorted Source Nodes: [input_69], Original ATen: [aten.convolution]
buf103 = extern_kernels.convolution(buf102, primals_105, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf103, (4, 4, 4, 4), (64, 16, 4, 1))
buf104 = buf103; del buf103 # reuse
buf105 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_69, input_70], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_5.run(buf104, primals_106, primals_107, buf105, 256, grid=grid(256), stream=stream0)
del primals_106
# Topologically Sorted Source Nodes: [input_71], Original ATen: [aten.convolution]
buf106 = extern_kernels.convolution(buf105, primals_108, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf106, (4, 4, 4, 4), (64, 16, 4, 1))
buf107 = buf106; del buf106 # reuse
# Topologically Sorted Source Nodes: [input_71], Original ATen: [aten.convolution]
triton_poi_fused_convolution_6.run(buf107, primals_109, 256, grid=grid(256), stream=stream0)
del primals_109
buf108 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_10], Original ATen: [aten.cat]
triton_poi_fused_cat_7.run(buf102, buf105, buf107, primals_110, buf108, 768, grid=grid(768), stream=stream0)
# Topologically Sorted Source Nodes: [input_73], Original ATen: [aten.convolution]
buf109 = extern_kernels.convolution(buf108, primals_111, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf109, (4, 4, 4, 4), (64, 16, 4, 1))
buf110 = buf109; del buf109 # reuse
buf111 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_73, input_74], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_5.run(buf110, primals_112, primals_113, buf111, 256, grid=grid(256), stream=stream0)
del primals_112
# Topologically Sorted Source Nodes: [input_75], Original ATen: [aten.convolution]
buf112 = extern_kernels.convolution(buf111, primals_114, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf112, (4, 4, 4, 4), (64, 16, 4, 1))
buf113 = buf112; del buf112 # reuse
buf114 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_75, input_76], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_5.run(buf113, primals_115, primals_116, buf114, 256, grid=grid(256), stream=stream0)
del primals_115
# Topologically Sorted Source Nodes: [input_77], Original ATen: [aten.convolution]
buf115 = extern_kernels.convolution(buf114, primals_117, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf115, (4, 4, 4, 4), (64, 16, 4, 1))
buf116 = buf115; del buf115 # reuse
# Topologically Sorted Source Nodes: [input_77], Original ATen: [aten.convolution]
triton_poi_fused_convolution_6.run(buf116, primals_118, 256, grid=grid(256), stream=stream0)
del primals_118
buf117 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_11], Original ATen: [aten.cat]
triton_poi_fused_cat_7.run(buf111, buf114, buf116, primals_119, buf117, 768, grid=grid(768), stream=stream0)
# Topologically Sorted Source Nodes: [input_79], Original ATen: [aten.convolution]
buf118 = extern_kernels.convolution(buf117, primals_120, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf118, (4, 4, 4, 4), (64, 16, 4, 1))
buf119 = buf118; del buf118 # reuse
buf120 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_79, input_80], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_5.run(buf119, primals_121, primals_122, buf120, 256, grid=grid(256), stream=stream0)
del primals_121
# Topologically Sorted Source Nodes: [input_81], Original ATen: [aten.convolution]
buf121 = extern_kernels.convolution(buf120, primals_123, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf121, (4, 4, 4, 4), (64, 16, 4, 1))
buf122 = buf121; del buf121 # reuse
buf123 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_81, input_82], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_5.run(buf122, primals_124, primals_125, buf123, 256, grid=grid(256), stream=stream0)
del primals_124
# Topologically Sorted Source Nodes: [input_83], Original ATen: [aten.convolution]
buf124 = extern_kernels.convolution(buf123, primals_126, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf124, (4, 4, 4, 4), (64, 16, 4, 1))
buf125 = buf124; del buf124 # reuse
# Topologically Sorted Source Nodes: [input_83], Original ATen: [aten.convolution]
triton_poi_fused_convolution_6.run(buf125, primals_127, 256, grid=grid(256), stream=stream0)
del primals_127
buf126 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_12], Original ATen: [aten.cat]
triton_poi_fused_cat_7.run(buf120, buf123, buf125, primals_128, buf126, 768, grid=grid(768), stream=stream0)
# Topologically Sorted Source Nodes: [input_85], Original ATen: [aten.convolution]
buf127 = extern_kernels.convolution(buf126, primals_129, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf127, (4, 4, 4, 4), (64, 16, 4, 1))
buf128 = buf127; del buf127 # reuse
buf129 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_85, input_86], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_5.run(buf128, primals_130, primals_131, buf129, 256, grid=grid(256), stream=stream0)
del primals_130
# Topologically Sorted Source Nodes: [input_87], Original ATen: [aten.convolution]
buf130 = extern_kernels.convolution(buf129, primals_132, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf130, (4, 4, 4, 4), (64, 16, 4, 1))
buf131 = buf130; del buf130 # reuse
buf132 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_87, input_88], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_5.run(buf131, primals_133, primals_134, buf132, 256, grid=grid(256), stream=stream0)
del primals_133
# Topologically Sorted Source Nodes: [input_89], Original ATen: [aten.convolution]
buf133 = extern_kernels.convolution(buf132, primals_135, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf133, (4, 4, 4, 4), (64, 16, 4, 1))
buf134 = buf133; del buf133 # reuse
# Topologically Sorted Source Nodes: [input_89], Original ATen: [aten.convolution]
triton_poi_fused_convolution_6.run(buf134, primals_136, 256, grid=grid(256), stream=stream0)
del primals_136
buf135 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_13], Original ATen: [aten.cat]
triton_poi_fused_cat_7.run(buf129, buf132, buf134, primals_137, buf135, 768, grid=grid(768), stream=stream0)
# Topologically Sorted Source Nodes: [input_91], Original ATen: [aten.convolution]
buf136 = extern_kernels.convolution(buf135, primals_138, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf136, (4, 4, 4, 4), (64, 16, 4, 1))
buf137 = buf136; del buf136 # reuse
buf138 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_91, input_92], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_5.run(buf137, primals_139, primals_140, buf138, 256, grid=grid(256), stream=stream0)
del primals_139
# Topologically Sorted Source Nodes: [input_93], Original ATen: [aten.convolution]
buf139 = extern_kernels.convolution(buf138, primals_141, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf139, (4, 4, 4, 4), (64, 16, 4, 1))
buf140 = buf139; del buf139 # reuse
buf141 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_93, input_94], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_5.run(buf140, primals_142, primals_143, buf141, 256, grid=grid(256), stream=stream0)
del primals_142
# Topologically Sorted Source Nodes: [input_95], Original ATen: [aten.convolution]
buf142 = extern_kernels.convolution(buf141, primals_144, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf142, (4, 4, 4, 4), (64, 16, 4, 1))
buf143 = buf142; del buf142 # reuse
# Topologically Sorted Source Nodes: [input_95], Original ATen: [aten.convolution]
triton_poi_fused_convolution_6.run(buf143, primals_145, 256, grid=grid(256), stream=stream0)
del primals_145
buf144 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_14], Original ATen: [aten.cat]
triton_poi_fused_cat_7.run(buf138, buf141, buf143, primals_146, buf144, 768, grid=grid(768), stream=stream0)
# Topologically Sorted Source Nodes: [input_97], Original ATen: [aten.convolution]
buf145 = extern_kernels.convolution(buf144, primals_147, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf145, (4, 16, 4, 4), (256, 16, 4, 1))
buf146 = buf145; del buf145 # reuse
buf147 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_97, input_98], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_8.run(buf146, primals_148, primals_149, buf147, 1024, grid=grid(1024), stream=stream0)
del primals_148
# Topologically Sorted Source Nodes: [input_99], Original ATen: [aten.convolution]
buf148 = extern_kernels.convolution(buf147, primals_150, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf148, (4, 4, 4, 4), (64, 16, 4, 1))
buf149 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.cat]
triton_poi_fused_cat_4.run(primals_3, buf148, primals_151, buf149, 512, grid=grid(512), stream=stream0)
del primals_151
# Topologically Sorted Source Nodes: [input_100], Original ATen: [aten.convolution]
buf150 = extern_kernels.convolution(buf149, primals_152, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf150, (4, 4, 4, 4), (64, 16, 4, 1))
buf151 = buf150; del buf150 # reuse
buf152 = buf148; del buf148 # reuse
# Topologically Sorted Source Nodes: [input_100, input_101], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_5.run(buf151, primals_153, primals_154, buf152, 256, grid=grid(256), stream=stream0)
del primals_153
# Topologically Sorted Source Nodes: [input_102], Original ATen: [aten.convolution]
buf153 = extern_kernels.convolution(buf152, primals_155, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf153, (4, 4, 4, 4), (64, 16, 4, 1))
buf154 = buf153; del buf153 # reuse
buf155 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_102, input_103], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_5.run(buf154, primals_156, primals_157, buf155, 256, grid=grid(256), stream=stream0)
del primals_156
# Topologically Sorted Source Nodes: [input_104], Original ATen: [aten.convolution]
buf156 = extern_kernels.convolution(buf155, primals_158, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf156, (4, 4, 4, 4), (64, 16, 4, 1))
buf157 = buf156; del buf156 # reuse
# Topologically Sorted Source Nodes: [input_104], Original ATen: [aten.convolution]
triton_poi_fused_convolution_6.run(buf157, primals_159, 256, grid=grid(256), stream=stream0)
del primals_159
buf158 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_15], Original ATen: [aten.cat]
triton_poi_fused_cat_7.run(buf152, buf155, buf157, primals_160, buf158, 768, grid=grid(768), stream=stream0)
# Topologically Sorted Source Nodes: [input_106], Original ATen: [aten.convolution]
buf159 = extern_kernels.convolution(buf158, primals_161, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf159, (4, 4, 4, 4), (64, 16, 4, 1))
buf160 = buf159; del buf159 # reuse
buf161 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_106, input_107], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_5.run(buf160, primals_162, primals_163, buf161, 256, grid=grid(256), stream=stream0)
del primals_162
# Topologically Sorted Source Nodes: [input_108], Original ATen: [aten.convolution]
buf162 = extern_kernels.convolution(buf161, primals_164, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf162, (4, 4, 4, 4), (64, 16, 4, 1))
buf163 = buf162; del buf162 # reuse
buf164 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_108, input_109], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_5.run(buf163, primals_165, primals_166, buf164, 256, grid=grid(256), stream=stream0)
del primals_165
# Topologically Sorted Source Nodes: [input_110], Original ATen: [aten.convolution]
buf165 = extern_kernels.convolution(buf164, primals_167, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf165, (4, 4, 4, 4), (64, 16, 4, 1))
buf166 = buf165; del buf165 # reuse
# Topologically Sorted Source Nodes: [input_110], Original ATen: [aten.convolution]
triton_poi_fused_convolution_6.run(buf166, primals_168, 256, grid=grid(256), stream=stream0)
del primals_168
buf167 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_16], Original ATen: [aten.cat]
triton_poi_fused_cat_7.run(buf161, buf164, buf166, primals_169, buf167, 768, grid=grid(768), stream=stream0)
# Topologically Sorted Source Nodes: [input_112], Original ATen: [aten.convolution]
buf168 = extern_kernels.convolution(buf167, primals_170, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf168, (4, 4, 4, 4), (64, 16, 4, 1))
buf169 = buf168; del buf168 # reuse
buf170 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_112, input_113], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_5.run(buf169, primals_171, primals_172, buf170, 256, grid=grid(256), stream=stream0)
del primals_171
# Topologically Sorted Source Nodes: [input_114], Original ATen: [aten.convolution]
buf171 = extern_kernels.convolution(buf170, primals_173, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf171, (4, 4, 4, 4), (64, 16, 4, 1))
buf172 = buf171; del buf171 # reuse
buf173 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_114, input_115], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_5.run(buf172, primals_174, primals_175, buf173, 256, grid=grid(256), stream=stream0)
del primals_174
# Topologically Sorted Source Nodes: [input_116], Original ATen: [aten.convolution]
buf174 = extern_kernels.convolution(buf173, primals_176, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf174, (4, 4, 4, 4), (64, 16, 4, 1))
buf175 = buf174; del buf174 # reuse
# Topologically Sorted Source Nodes: [input_116], Original ATen: [aten.convolution]
triton_poi_fused_convolution_6.run(buf175, primals_177, 256, grid=grid(256), stream=stream0)
del primals_177
buf176 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_17], Original ATen: [aten.cat]
triton_poi_fused_cat_7.run(buf170, buf173, buf175, primals_178, buf176, 768, grid=grid(768), stream=stream0)
# Topologically Sorted Source Nodes: [input_118], Original ATen: [aten.convolution]
buf177 = extern_kernels.convolution(buf176, primals_179, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf177, (4, 4, 4, 4), (64, 16, 4, 1))
buf178 = buf177; del buf177 # reuse
buf179 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_118, input_119], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_5.run(buf178, primals_180, primals_181, buf179, 256, grid=grid(256), stream=stream0)
del primals_180
# Topologically Sorted Source Nodes: [input_120], Original ATen: [aten.convolution]
buf180 = extern_kernels.convolution(buf179, primals_182, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf180, (4, 4, 4, 4), (64, 16, 4, 1))
buf181 = buf180; del buf180 # reuse
buf182 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_120, input_121], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_5.run(buf181, primals_183, primals_184, buf182, 256, grid=grid(256), stream=stream0)
del primals_183
# Topologically Sorted Source Nodes: [input_122], Original ATen: [aten.convolution]
buf183 = extern_kernels.convolution(buf182, primals_185, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf183, (4, 4, 4, 4), (64, 16, 4, 1))
buf184 = buf183; del buf183 # reuse
# Topologically Sorted Source Nodes: [input_122], Original ATen: [aten.convolution]
triton_poi_fused_convolution_6.run(buf184, primals_186, 256, grid=grid(256), stream=stream0)
del primals_186
buf185 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_18], Original ATen: [aten.cat]
triton_poi_fused_cat_7.run(buf179, buf182, buf184, primals_187, buf185, 768, grid=grid(768), stream=stream0)
# Topologically Sorted Source Nodes: [input_124], Original ATen: [aten.convolution]
buf186 = extern_kernels.convolution(buf185, primals_188, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf186, (4, 4, 4, 4), (64, 16, 4, 1))
buf187 = buf186; del buf186 # reuse
buf188 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_124, input_125], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_5.run(buf187, primals_189, primals_190, buf188, 256, grid=grid(256), stream=stream0)
del primals_189
# Topologically Sorted Source Nodes: [input_126], Original ATen: [aten.convolution]
buf189 = extern_kernels.convolution(buf188, primals_191, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf189, (4, 4, 4, 4), (64, 16, 4, 1))
buf190 = buf189; del buf189 # reuse
buf191 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_126, input_127], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_5.run(buf190, primals_192, primals_193, buf191, 256, grid=grid(256), stream=stream0)
del primals_192
# Topologically Sorted Source Nodes: [input_128], Original ATen: [aten.convolution]
buf192 = extern_kernels.convolution(buf191, primals_194, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf192, (4, 4, 4, 4), (64, 16, 4, 1))
buf193 = buf192; del buf192 # reuse
# Topologically Sorted Source Nodes: [input_128], Original ATen: [aten.convolution]
triton_poi_fused_convolution_6.run(buf193, primals_195, 256, grid=grid(256), stream=stream0)
del primals_195
buf194 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_19], Original ATen: [aten.cat]
triton_poi_fused_cat_7.run(buf188, buf191, buf193, primals_196, buf194, 768, grid=grid(768), stream=stream0)
# Topologically Sorted Source Nodes: [input_130], Original ATen: [aten.convolution]
buf195 = extern_kernels.convolution(buf194, primals_197, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf195, (4, 16, 4, 4), (256, 16, 4, 1))
buf196 = buf195; del buf195 # reuse
buf197 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_130, input_131], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_8.run(buf196, primals_198, primals_199, buf197, 1024, grid=grid(1024), stream=stream0)
del primals_198
# Topologically Sorted Source Nodes: [input_132], Original ATen: [aten.convolution]
buf198 = extern_kernels.convolution(buf197, primals_200, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf198, (4, 4, 4, 4), (64, 16, 4, 1))
buf199 = buf198; del buf198 # reuse
# Topologically Sorted Source Nodes: [input_132], Original ATen: [aten.convolution]
triton_poi_fused_convolution_6.run(buf199, primals_201, 256, grid=grid(256), stream=stream0)
del primals_201
return (buf199, primals_1, primals_3, primals_4, primals_5, primals_7, primals_8, primals_10, primals_11, primals_13, primals_14, primals_16, primals_17, primals_19, primals_20, primals_22, primals_23, primals_25, primals_26, primals_28, primals_29, primals_31, primals_32, primals_34, primals_35, primals_37, primals_38, primals_40, primals_41, primals_43, primals_44, primals_46, primals_47, primals_49, primals_50, primals_52, primals_54, primals_55, primals_57, primals_58, primals_60, primals_61, primals_63, primals_64, primals_66, primals_67, primals_69, primals_70, primals_72, primals_73, primals_75, primals_76, primals_78, primals_79, primals_81, primals_82, primals_84, primals_85, primals_87, primals_88, primals_90, primals_91, primals_93, primals_94, primals_96, primals_97, primals_99, primals_100, primals_102, primals_104, primals_105, primals_107, primals_108, primals_110, primals_111, primals_113, primals_114, primals_116, primals_117, primals_119, primals_120, primals_122, primals_123, primals_125, primals_126, primals_128, primals_129, primals_131, primals_132, primals_134, primals_135, primals_137, primals_138, primals_140, primals_141, primals_143, primals_144, primals_146, primals_147, primals_149, primals_150, primals_152, primals_154, primals_155, primals_157, primals_158, primals_160, primals_161, primals_163, primals_164, primals_166, primals_167, primals_169, primals_170, primals_172, primals_173, primals_175, primals_176, primals_178, primals_179, primals_181, primals_182, primals_184, primals_185, primals_187, primals_188, primals_190, primals_191, primals_193, primals_194, primals_196, primals_197, primals_199, primals_200, buf1, buf2, buf4, buf5, buf7, buf8, buf10, buf11, buf13, buf14, buf16, buf17, buf19, buf20, buf22, buf23, buf25, buf26, buf28, buf29, buf31, buf32, buf34, buf35, buf37, buf38, buf40, buf41, buf43, buf44, buf46, buf47, buf49, buf51, buf52, buf54, buf55, buf57, buf58, buf60, buf61, buf63, buf64, buf66, buf67, buf69, buf70, buf72, buf73, buf75, buf76, buf78, buf79, buf81, buf82, buf84, buf85, buf87, buf88, buf90, buf91, buf93, buf94, buf96, buf97, buf99, buf101, buf102, buf104, buf105, buf107, buf108, buf110, buf111, buf113, buf114, buf116, buf117, buf119, buf120, buf122, buf123, buf125, buf126, buf128, buf129, buf131, buf132, buf134, buf135, buf137, buf138, buf140, buf141, buf143, buf144, buf146, buf147, buf149, buf151, buf152, buf154, buf155, buf157, buf158, buf160, buf161, buf163, buf164, buf166, buf167, buf169, buf170, buf172, buf173, buf175, buf176, buf178, buf179, buf181, buf182, buf184, buf185, buf187, buf188, buf190, buf191, buf193, buf194, buf196, buf197, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((96, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((96, 96, 3, 3), (864, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((96, 96, 3, 3), (864, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((96, 288, 3, 3), (2592, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((96, 96, 3, 3), (864, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((96, 96, 3, 3), (864, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((96, 288, 3, 3), (2592, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((96, 96, 3, 3), (864, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((96, 96, 3, 3), (864, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_28 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_29 = rand_strided((96, 288, 3, 3), (2592, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_30 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_31 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_32 = rand_strided((96, 96, 3, 3), (864, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_33 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_34 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_35 = rand_strided((96, 96, 3, 3), (864, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_36 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_37 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_38 = rand_strided((96, 288, 3, 3), (2592, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_39 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_40 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_41 = rand_strided((96, 96, 3, 3), (864, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_42 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_43 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_44 = rand_strided((96, 96, 3, 3), (864, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_45 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_46 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_47 = rand_strided((8, 288, 1, 1), (288, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_48 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_49 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_50 = rand_strided((4, 8, 1, 1), (8, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_51 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_52 = rand_strided((4, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_53 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_54 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_55 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_56 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_57 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_58 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_59 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_60 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_61 = rand_strided((4, 12, 3, 3), (108, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_62 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_63 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_64 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_65 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_66 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_67 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_68 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_69 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_70 = rand_strided((4, 12, 3, 3), (108, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_71 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_72 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_73 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_74 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_75 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_76 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_77 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_78 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_79 = rand_strided((4, 12, 3, 3), (108, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_80 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_81 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_82 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_83 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_84 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_85 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_86 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_87 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_88 = rand_strided((4, 12, 3, 3), (108, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_89 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_90 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_91 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_92 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_93 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_94 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_95 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_96 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_97 = rand_strided((16, 12, 1, 1), (12, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_98 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_99 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_100 = rand_strided((4, 16, 1, 1), (16, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_101 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_102 = rand_strided((4, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_103 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_104 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_105 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_106 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_107 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_108 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_109 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_110 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_111 = rand_strided((4, 12, 3, 3), (108, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_112 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_113 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_114 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_115 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_116 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_117 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_118 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_119 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_120 = rand_strided((4, 12, 3, 3), (108, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_121 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_122 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_123 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_124 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_125 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_126 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_127 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_128 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_129 = rand_strided((4, 12, 3, 3), (108, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_130 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_131 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_132 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_133 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_134 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_135 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_136 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_137 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_138 = rand_strided((4, 12, 3, 3), (108, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_139 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_140 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_141 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_142 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_143 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_144 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_145 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_146 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_147 = rand_strided((16, 12, 1, 1), (12, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_148 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_149 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_150 = rand_strided((4, 16, 1, 1), (16, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_151 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_152 = rand_strided((4, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_153 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_154 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_155 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_156 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_157 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_158 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_159 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_160 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_161 = rand_strided((4, 12, 3, 3), (108, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_162 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_163 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_164 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_165 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_166 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_167 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_168 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_169 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_170 = rand_strided((4, 12, 3, 3), (108, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_171 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_172 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_173 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_174 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_175 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_176 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_177 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_178 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_179 = rand_strided((4, 12, 3, 3), (108, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_180 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_181 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_182 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_183 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_184 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_185 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_186 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_187 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_188 = rand_strided((4, 12, 3, 3), (108, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_189 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_190 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_191 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_192 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_193 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_194 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_195 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_196 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_197 = rand_strided((16, 12, 1, 1), (12, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_198 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_199 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_200 = rand_strided((4, 16, 1, 1), (16, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_201 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102, primals_103, primals_104, primals_105, primals_106, primals_107, primals_108, primals_109, primals_110, primals_111, primals_112, primals_113, primals_114, primals_115, primals_116, primals_117, primals_118, primals_119, primals_120, primals_121, primals_122, primals_123, primals_124, primals_125, primals_126, primals_127, primals_128, primals_129, primals_130, primals_131, primals_132, primals_133, primals_134, primals_135, primals_136, primals_137, primals_138, primals_139, primals_140, primals_141, primals_142, primals_143, primals_144, primals_145, primals_146, primals_147, primals_148, primals_149, primals_150, primals_151, primals_152, primals_153, primals_154, primals_155, primals_156, primals_157, primals_158, primals_159, primals_160, primals_161, primals_162, primals_163, primals_164, primals_165, primals_166, primals_167, primals_168, primals_169, primals_170, primals_171, primals_172, primals_173, primals_174, primals_175, primals_176, primals_177, primals_178, primals_179, primals_180, primals_181, primals_182, primals_183, primals_184, primals_185, primals_186, primals_187, primals_188, primals_189, primals_190, primals_191, primals_192, primals_193, primals_194, primals_195, primals_196, primals_197, primals_198, primals_199, primals_200, primals_201])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from itertools import chain as chain
from collections import OrderedDict
import torch.hub
class concatLayer(nn.Module):
def __init__(self, in_channels, out_channels_perSub, i, j, appendix):
super(concatLayer, self).__init__()
self.firstSub = self.concatLayerSub(in_channels,
out_channels_perSub, '%d_stage%d_' % (i, j) + appendix + '_0')
self.secondSub = self.concatLayerSub(out_channels_perSub,
out_channels_perSub, '%d_stage%d_' % (i, j) + appendix + '_1')
self.thirdSub = self.concatLayerSub(out_channels_perSub,
out_channels_perSub, '%d_stage%d_' % (i, j) + appendix + '_2')
def forward(self, x):
firstSub = self.firstSub(x)
secondSub = self.secondSub(firstSub)
thirdSub = self.thirdSub(secondSub)
out = torch.cat([firstSub, secondSub, thirdSub], 1)
return out
def concatLayerSub(self, in_channels, out_channels, layerName):
concatLayerSubOrdered = OrderedDict()
conv2d = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1)
concatLayerSubOrdered.update({('Mconv' + layerName): conv2d})
concatLayerSubOrdered.update({('Mprelu' + layerName): nn.PReLU(
out_channels)})
return nn.Sequential(concatLayerSubOrdered)
class stage(nn.Module):
def __init__(self, stageID, in_channels, out_channels_perSub,
mid_channels, out_channels, appendix):
super(stage, self).__init__()
self.firstConcat = concatLayer(in_channels, out_channels_perSub, 1,
stageID, appendix)
self.secondConcat = concatLayer(3 * out_channels_perSub,
out_channels_perSub, 2, stageID, appendix)
self.thirdConcat = concatLayer(3 * out_channels_perSub,
out_channels_perSub, 3, stageID, appendix)
self.fourthConcat = concatLayer(3 * out_channels_perSub,
out_channels_perSub, 4, stageID, appendix)
self.fifthConcat = concatLayer(3 * out_channels_perSub,
out_channels_perSub, 5, stageID, appendix)
conv2d = nn.Conv2d(3 * out_channels_perSub, mid_channels,
kernel_size=1, padding=0)
prelu = nn.PReLU(mid_channels)
self.afterConcatsFirst = nn.Sequential(OrderedDict({(
'Mconv6_stage%d_%s' % (stageID, appendix)): conv2d, (
'Mprelu6_stage%d_%s' % (stageID, appendix)): prelu}))
conv2d = nn.Conv2d(mid_channels, out_channels, kernel_size=1, padding=0
)
self.afterConcatsSecond = nn.Sequential(OrderedDict({(
'Mconv7_stage%d_%s' % (stageID, appendix)): conv2d}))
def forward(self, x):
x = self.firstConcat(x)
x = self.secondConcat(x)
x = self.thirdConcat(x)
x = self.fourthConcat(x)
x = self.fifthConcat(x)
x = self.afterConcatsFirst(x)
out = self.afterConcatsSecond(x)
return out
class L2Part(nn.Module):
def __init__(self, in_channels, stage_out_channels):
super(L2Part, self).__init__()
self.firstStage = stage(0, in_channels, 96, in_channels * 2,
stage_out_channels, 'L2')
self.secondStage = stage(1, in_channels + stage_out_channels,
in_channels, in_channels * 4, stage_out_channels, 'L2')
self.thirdStage = stage(2, in_channels + stage_out_channels,
in_channels, in_channels * 4, stage_out_channels, 'L2')
self.fourthStage = stage(3, in_channels + stage_out_channels,
in_channels, in_channels * 4, stage_out_channels, 'L2')
def forward(self, features):
x = self.firstStage(features)
x = torch.cat([features, x], 1)
x = self.secondStage(x)
x = torch.cat([features, x], 1)
x = self.thirdStage(x)
x = torch.cat([features, x], 1)
out = self.fourthStage(x)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'stage_out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from itertools import chain as chain
from collections import OrderedDict
import torch.hub
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__prelu_kernel_convolution_0(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 96
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp6 = tmp5 * tmp2
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + x3, tmp2, None)
tl.store(out_ptr0 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 96
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 16 % 288
x0 = xindex % 16
x2 = xindex // 4608
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 96, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 1536 * x2), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 192, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 16 * (-96 + x1) + 1536 * x2), tmp9,
other=0.0)
tmp11 = tmp0 >= tmp7
tl.full([1], 288, tl.int64)
tmp14 = tl.load(in_ptr2 + (x0 + 16 * (-192 + x1) + 1536 * x2), tmp11,
other=0.0)
tmp15 = 0.0
tmp16 = tmp14 > tmp15
tmp17 = tl.load(in_ptr3 + (-192 + x1), tmp11, eviction_policy=
'evict_last', other=0.0)
tmp18 = tmp17 * tmp14
tmp19 = tl.where(tmp16, tmp14, tmp18)
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp11, tmp19, tmp20)
tmp22 = tl.where(tmp9, tmp10, tmp21)
tmp23 = tl.where(tmp4, tmp5, tmp22)
tl.store(out_ptr0 + x3, tmp23, None)
@triton.jit
def triton_poi_fused__prelu_kernel_convolution_3(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 8
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp6 = tmp5 * tmp2
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_cat_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 8
x0 = xindex % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask,
other=0.0)
tmp10 = tl.load(in_ptr2 + (-4 + x1), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = tl.where(tmp4, tmp5, tmp13)
tl.store(out_ptr0 + x3, tmp14, xmask)
@triton.jit
def triton_poi_fused__prelu_kernel_convolution_5(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp6 = tmp5 * tmp2
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_convolution_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_cat_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 12
x0 = xindex % 16
x2 = xindex // 192
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp9 & xmask,
other=0.0)
tmp11 = tmp0 >= tmp7
tl.full([1], 12, tl.int64)
tmp14 = tl.load(in_ptr2 + (x0 + 16 * (-8 + x1) + 64 * x2), tmp11 &
xmask, other=0.0)
tmp15 = 0.0
tmp16 = tmp14 > tmp15
tmp17 = tl.load(in_ptr3 + (-8 + x1), tmp11 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp18 = tmp17 * tmp14
tmp19 = tl.where(tmp16, tmp14, tmp18)
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp11, tmp19, tmp20)
tmp22 = tl.where(tmp9, tmp10, tmp21)
tmp23 = tl.where(tmp4, tmp5, tmp22)
tl.store(out_ptr0 + x3, tmp23, xmask)
@triton.jit
def triton_poi_fused__prelu_kernel_convolution_8(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp6 = tmp5 * tmp2
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40, primals_41, primals_42,
primals_43, primals_44, primals_45, primals_46, primals_47,
primals_48, primals_49, primals_50, primals_51, primals_52,
primals_53, primals_54, primals_55, primals_56, primals_57,
primals_58, primals_59, primals_60, primals_61, primals_62,
primals_63, primals_64, primals_65, primals_66, primals_67,
primals_68, primals_69, primals_70, primals_71, primals_72,
primals_73, primals_74, primals_75, primals_76, primals_77,
primals_78, primals_79, primals_80, primals_81, primals_82,
primals_83, primals_84, primals_85, primals_86, primals_87,
primals_88, primals_89, primals_90, primals_91, primals_92,
primals_93, primals_94, primals_95, primals_96, primals_97,
primals_98, primals_99, primals_100, primals_101, primals_102,
primals_103, primals_104, primals_105, primals_106, primals_107,
primals_108, primals_109, primals_110, primals_111, primals_112,
primals_113, primals_114, primals_115, primals_116, primals_117,
primals_118, primals_119, primals_120, primals_121, primals_122,
primals_123, primals_124, primals_125, primals_126, primals_127,
primals_128, primals_129, primals_130, primals_131, primals_132,
primals_133, primals_134, primals_135, primals_136, primals_137,
primals_138, primals_139, primals_140, primals_141, primals_142,
primals_143, primals_144, primals_145, primals_146, primals_147,
primals_148, primals_149, primals_150, primals_151, primals_152,
primals_153, primals_154, primals_155, primals_156, primals_157,
primals_158, primals_159, primals_160, primals_161, primals_162,
primals_163, primals_164, primals_165, primals_166, primals_167,
primals_168, primals_169, primals_170, primals_171, primals_172,
primals_173, primals_174, primals_175, primals_176, primals_177,
primals_178, primals_179, primals_180, primals_181, primals_182,
primals_183, primals_184, primals_185, primals_186, primals_187,
primals_188, primals_189, primals_190, primals_191, primals_192,
primals_193, primals_194, primals_195, primals_196, primals_197,
primals_198, primals_199, primals_200, primals_201) = args
args.clear()
assert_size_stride(primals_1, (96, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (96,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (96,), (1,))
assert_size_stride(primals_5, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_6, (96,), (1,))
assert_size_stride(primals_7, (96,), (1,))
assert_size_stride(primals_8, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_9, (96,), (1,))
assert_size_stride(primals_10, (96,), (1,))
assert_size_stride(primals_11, (96, 288, 3, 3), (2592, 9, 3, 1))
assert_size_stride(primals_12, (96,), (1,))
assert_size_stride(primals_13, (96,), (1,))
assert_size_stride(primals_14, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_15, (96,), (1,))
assert_size_stride(primals_16, (96,), (1,))
assert_size_stride(primals_17, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_18, (96,), (1,))
assert_size_stride(primals_19, (96,), (1,))
assert_size_stride(primals_20, (96, 288, 3, 3), (2592, 9, 3, 1))
assert_size_stride(primals_21, (96,), (1,))
assert_size_stride(primals_22, (96,), (1,))
assert_size_stride(primals_23, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_24, (96,), (1,))
assert_size_stride(primals_25, (96,), (1,))
assert_size_stride(primals_26, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_27, (96,), (1,))
assert_size_stride(primals_28, (96,), (1,))
assert_size_stride(primals_29, (96, 288, 3, 3), (2592, 9, 3, 1))
assert_size_stride(primals_30, (96,), (1,))
assert_size_stride(primals_31, (96,), (1,))
assert_size_stride(primals_32, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_33, (96,), (1,))
assert_size_stride(primals_34, (96,), (1,))
assert_size_stride(primals_35, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_36, (96,), (1,))
assert_size_stride(primals_37, (96,), (1,))
assert_size_stride(primals_38, (96, 288, 3, 3), (2592, 9, 3, 1))
assert_size_stride(primals_39, (96,), (1,))
assert_size_stride(primals_40, (96,), (1,))
assert_size_stride(primals_41, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_42, (96,), (1,))
assert_size_stride(primals_43, (96,), (1,))
assert_size_stride(primals_44, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_45, (96,), (1,))
assert_size_stride(primals_46, (96,), (1,))
assert_size_stride(primals_47, (8, 288, 1, 1), (288, 1, 1, 1))
assert_size_stride(primals_48, (8,), (1,))
assert_size_stride(primals_49, (8,), (1,))
assert_size_stride(primals_50, (4, 8, 1, 1), (8, 1, 1, 1))
assert_size_stride(primals_51, (4,), (1,))
assert_size_stride(primals_52, (4, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_53, (4,), (1,))
assert_size_stride(primals_54, (4,), (1,))
assert_size_stride(primals_55, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_56, (4,), (1,))
assert_size_stride(primals_57, (4,), (1,))
assert_size_stride(primals_58, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_59, (4,), (1,))
assert_size_stride(primals_60, (4,), (1,))
assert_size_stride(primals_61, (4, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_62, (4,), (1,))
assert_size_stride(primals_63, (4,), (1,))
assert_size_stride(primals_64, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_65, (4,), (1,))
assert_size_stride(primals_66, (4,), (1,))
assert_size_stride(primals_67, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_68, (4,), (1,))
assert_size_stride(primals_69, (4,), (1,))
assert_size_stride(primals_70, (4, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_71, (4,), (1,))
assert_size_stride(primals_72, (4,), (1,))
assert_size_stride(primals_73, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_74, (4,), (1,))
assert_size_stride(primals_75, (4,), (1,))
assert_size_stride(primals_76, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_77, (4,), (1,))
assert_size_stride(primals_78, (4,), (1,))
assert_size_stride(primals_79, (4, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_80, (4,), (1,))
assert_size_stride(primals_81, (4,), (1,))
assert_size_stride(primals_82, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_83, (4,), (1,))
assert_size_stride(primals_84, (4,), (1,))
assert_size_stride(primals_85, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_86, (4,), (1,))
assert_size_stride(primals_87, (4,), (1,))
assert_size_stride(primals_88, (4, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_89, (4,), (1,))
assert_size_stride(primals_90, (4,), (1,))
assert_size_stride(primals_91, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_92, (4,), (1,))
assert_size_stride(primals_93, (4,), (1,))
assert_size_stride(primals_94, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_95, (4,), (1,))
assert_size_stride(primals_96, (4,), (1,))
assert_size_stride(primals_97, (16, 12, 1, 1), (12, 1, 1, 1))
assert_size_stride(primals_98, (16,), (1,))
assert_size_stride(primals_99, (16,), (1,))
assert_size_stride(primals_100, (4, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_101, (4,), (1,))
assert_size_stride(primals_102, (4, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_103, (4,), (1,))
assert_size_stride(primals_104, (4,), (1,))
assert_size_stride(primals_105, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_106, (4,), (1,))
assert_size_stride(primals_107, (4,), (1,))
assert_size_stride(primals_108, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_109, (4,), (1,))
assert_size_stride(primals_110, (4,), (1,))
assert_size_stride(primals_111, (4, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_112, (4,), (1,))
assert_size_stride(primals_113, (4,), (1,))
assert_size_stride(primals_114, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_115, (4,), (1,))
assert_size_stride(primals_116, (4,), (1,))
assert_size_stride(primals_117, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_118, (4,), (1,))
assert_size_stride(primals_119, (4,), (1,))
assert_size_stride(primals_120, (4, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_121, (4,), (1,))
assert_size_stride(primals_122, (4,), (1,))
assert_size_stride(primals_123, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_124, (4,), (1,))
assert_size_stride(primals_125, (4,), (1,))
assert_size_stride(primals_126, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_127, (4,), (1,))
assert_size_stride(primals_128, (4,), (1,))
assert_size_stride(primals_129, (4, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_130, (4,), (1,))
assert_size_stride(primals_131, (4,), (1,))
assert_size_stride(primals_132, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_133, (4,), (1,))
assert_size_stride(primals_134, (4,), (1,))
assert_size_stride(primals_135, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_136, (4,), (1,))
assert_size_stride(primals_137, (4,), (1,))
assert_size_stride(primals_138, (4, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_139, (4,), (1,))
assert_size_stride(primals_140, (4,), (1,))
assert_size_stride(primals_141, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_142, (4,), (1,))
assert_size_stride(primals_143, (4,), (1,))
assert_size_stride(primals_144, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_145, (4,), (1,))
assert_size_stride(primals_146, (4,), (1,))
assert_size_stride(primals_147, (16, 12, 1, 1), (12, 1, 1, 1))
assert_size_stride(primals_148, (16,), (1,))
assert_size_stride(primals_149, (16,), (1,))
assert_size_stride(primals_150, (4, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_151, (4,), (1,))
assert_size_stride(primals_152, (4, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_153, (4,), (1,))
assert_size_stride(primals_154, (4,), (1,))
assert_size_stride(primals_155, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_156, (4,), (1,))
assert_size_stride(primals_157, (4,), (1,))
assert_size_stride(primals_158, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_159, (4,), (1,))
assert_size_stride(primals_160, (4,), (1,))
assert_size_stride(primals_161, (4, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_162, (4,), (1,))
assert_size_stride(primals_163, (4,), (1,))
assert_size_stride(primals_164, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_165, (4,), (1,))
assert_size_stride(primals_166, (4,), (1,))
assert_size_stride(primals_167, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_168, (4,), (1,))
assert_size_stride(primals_169, (4,), (1,))
assert_size_stride(primals_170, (4, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_171, (4,), (1,))
assert_size_stride(primals_172, (4,), (1,))
assert_size_stride(primals_173, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_174, (4,), (1,))
assert_size_stride(primals_175, (4,), (1,))
assert_size_stride(primals_176, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_177, (4,), (1,))
assert_size_stride(primals_178, (4,), (1,))
assert_size_stride(primals_179, (4, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_180, (4,), (1,))
assert_size_stride(primals_181, (4,), (1,))
assert_size_stride(primals_182, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_183, (4,), (1,))
assert_size_stride(primals_184, (4,), (1,))
assert_size_stride(primals_185, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_186, (4,), (1,))
assert_size_stride(primals_187, (4,), (1,))
assert_size_stride(primals_188, (4, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_189, (4,), (1,))
assert_size_stride(primals_190, (4,), (1,))
assert_size_stride(primals_191, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_192, (4,), (1,))
assert_size_stride(primals_193, (4,), (1,))
assert_size_stride(primals_194, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_195, (4,), (1,))
assert_size_stride(primals_196, (4,), (1,))
assert_size_stride(primals_197, (16, 12, 1, 1), (12, 1, 1, 1))
assert_size_stride(primals_198, (16,), (1,))
assert_size_stride(primals_199, (16,), (1,))
assert_size_stride(primals_200, (4, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_201, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 96, 4, 4), (1536, 16, 4, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused__prelu_kernel_convolution_0[grid(6144)](buf1,
primals_2, primals_4, buf2, 6144, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_2
buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 96, 4, 4), (1536, 16, 4, 1))
buf4 = buf3
del buf3
buf5 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.
float32)
triton_poi_fused__prelu_kernel_convolution_0[grid(6144)](buf4,
primals_6, primals_7, buf5, 6144, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_6
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 96, 4, 4), (1536, 16, 4, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_1[grid(6144)](buf7, primals_9, 6144,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_9
buf8 = empty_strided_cuda((4, 288, 4, 4), (4608, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_2[grid(18432)](buf2, buf5, buf7, primals_10,
buf8, 18432, XBLOCK=256, num_warps=4, num_stages=1)
buf9 = extern_kernels.convolution(buf8, primals_11, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 96, 4, 4), (1536, 16, 4, 1))
buf10 = buf9
del buf9
buf11 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.
float32)
triton_poi_fused__prelu_kernel_convolution_0[grid(6144)](buf10,
primals_12, primals_13, buf11, 6144, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_12
buf12 = extern_kernels.convolution(buf11, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 96, 4, 4), (1536, 16, 4, 1))
buf13 = buf12
del buf12
buf14 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.
float32)
triton_poi_fused__prelu_kernel_convolution_0[grid(6144)](buf13,
primals_15, primals_16, buf14, 6144, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_15
buf15 = extern_kernels.convolution(buf14, primals_17, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 96, 4, 4), (1536, 16, 4, 1))
buf16 = buf15
del buf15
triton_poi_fused_convolution_1[grid(6144)](buf16, primals_18, 6144,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_18
buf17 = empty_strided_cuda((4, 288, 4, 4), (4608, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_2[grid(18432)](buf11, buf14, buf16, primals_19,
buf17, 18432, XBLOCK=256, num_warps=4, num_stages=1)
buf18 = extern_kernels.convolution(buf17, primals_20, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 96, 4, 4), (1536, 16, 4, 1))
buf19 = buf18
del buf18
buf20 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.
float32)
triton_poi_fused__prelu_kernel_convolution_0[grid(6144)](buf19,
primals_21, primals_22, buf20, 6144, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_21
buf21 = extern_kernels.convolution(buf20, primals_23, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf21, (4, 96, 4, 4), (1536, 16, 4, 1))
buf22 = buf21
del buf21
buf23 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.
float32)
triton_poi_fused__prelu_kernel_convolution_0[grid(6144)](buf22,
primals_24, primals_25, buf23, 6144, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_24
buf24 = extern_kernels.convolution(buf23, primals_26, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 96, 4, 4), (1536, 16, 4, 1))
buf25 = buf24
del buf24
triton_poi_fused_convolution_1[grid(6144)](buf25, primals_27, 6144,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_27
buf26 = empty_strided_cuda((4, 288, 4, 4), (4608, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_2[grid(18432)](buf20, buf23, buf25, primals_28,
buf26, 18432, XBLOCK=256, num_warps=4, num_stages=1)
buf27 = extern_kernels.convolution(buf26, primals_29, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf27, (4, 96, 4, 4), (1536, 16, 4, 1))
buf28 = buf27
del buf27
buf29 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.
float32)
triton_poi_fused__prelu_kernel_convolution_0[grid(6144)](buf28,
primals_30, primals_31, buf29, 6144, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_30
buf30 = extern_kernels.convolution(buf29, primals_32, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 96, 4, 4), (1536, 16, 4, 1))
buf31 = buf30
del buf30
buf32 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.
float32)
triton_poi_fused__prelu_kernel_convolution_0[grid(6144)](buf31,
primals_33, primals_34, buf32, 6144, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_33
buf33 = extern_kernels.convolution(buf32, primals_35, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf33, (4, 96, 4, 4), (1536, 16, 4, 1))
buf34 = buf33
del buf33
triton_poi_fused_convolution_1[grid(6144)](buf34, primals_36, 6144,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_36
buf35 = empty_strided_cuda((4, 288, 4, 4), (4608, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_2[grid(18432)](buf29, buf32, buf34, primals_37,
buf35, 18432, XBLOCK=256, num_warps=4, num_stages=1)
buf36 = extern_kernels.convolution(buf35, primals_38, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf36, (4, 96, 4, 4), (1536, 16, 4, 1))
buf37 = buf36
del buf36
buf38 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.
float32)
triton_poi_fused__prelu_kernel_convolution_0[grid(6144)](buf37,
primals_39, primals_40, buf38, 6144, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_39
buf39 = extern_kernels.convolution(buf38, primals_41, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf39, (4, 96, 4, 4), (1536, 16, 4, 1))
buf40 = buf39
del buf39
buf41 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.
float32)
triton_poi_fused__prelu_kernel_convolution_0[grid(6144)](buf40,
primals_42, primals_43, buf41, 6144, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_42
buf42 = extern_kernels.convolution(buf41, primals_44, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf42, (4, 96, 4, 4), (1536, 16, 4, 1))
buf43 = buf42
del buf42
triton_poi_fused_convolution_1[grid(6144)](buf43, primals_45, 6144,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_45
buf44 = empty_strided_cuda((4, 288, 4, 4), (4608, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_2[grid(18432)](buf38, buf41, buf43, primals_46,
buf44, 18432, XBLOCK=256, num_warps=4, num_stages=1)
buf45 = extern_kernels.convolution(buf44, primals_47, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf45, (4, 8, 4, 4), (128, 16, 4, 1))
buf46 = buf45
del buf45
buf47 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32
)
triton_poi_fused__prelu_kernel_convolution_3[grid(512)](buf46,
primals_48, primals_49, buf47, 512, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_48
buf48 = extern_kernels.convolution(buf47, primals_50, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf48, (4, 4, 4, 4), (64, 16, 4, 1))
buf49 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32
)
triton_poi_fused_cat_4[grid(512)](primals_3, buf48, primals_51,
buf49, 512, XBLOCK=256, num_warps=4, num_stages=1)
del primals_51
buf50 = extern_kernels.convolution(buf49, primals_52, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf50, (4, 4, 4, 4), (64, 16, 4, 1))
buf51 = buf50
del buf50
buf52 = buf48
del buf48
triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf51,
primals_53, primals_54, buf52, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_53
buf53 = extern_kernels.convolution(buf52, primals_55, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf53, (4, 4, 4, 4), (64, 16, 4, 1))
buf54 = buf53
del buf53
buf55 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf54,
primals_56, primals_57, buf55, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_56
buf56 = extern_kernels.convolution(buf55, primals_58, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf56, (4, 4, 4, 4), (64, 16, 4, 1))
buf57 = buf56
del buf56
triton_poi_fused_convolution_6[grid(256)](buf57, primals_59, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_59
buf58 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_7[grid(768)](buf52, buf55, buf57, primals_60,
buf58, 768, XBLOCK=128, num_warps=4, num_stages=1)
buf59 = extern_kernels.convolution(buf58, primals_61, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf59, (4, 4, 4, 4), (64, 16, 4, 1))
buf60 = buf59
del buf59
buf61 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf60,
primals_62, primals_63, buf61, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_62
buf62 = extern_kernels.convolution(buf61, primals_64, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf62, (4, 4, 4, 4), (64, 16, 4, 1))
buf63 = buf62
del buf62
buf64 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf63,
primals_65, primals_66, buf64, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_65
buf65 = extern_kernels.convolution(buf64, primals_67, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf65, (4, 4, 4, 4), (64, 16, 4, 1))
buf66 = buf65
del buf65
triton_poi_fused_convolution_6[grid(256)](buf66, primals_68, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_68
buf67 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_7[grid(768)](buf61, buf64, buf66, primals_69,
buf67, 768, XBLOCK=128, num_warps=4, num_stages=1)
buf68 = extern_kernels.convolution(buf67, primals_70, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf68, (4, 4, 4, 4), (64, 16, 4, 1))
buf69 = buf68
del buf68
buf70 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf69,
primals_71, primals_72, buf70, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_71
buf71 = extern_kernels.convolution(buf70, primals_73, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf71, (4, 4, 4, 4), (64, 16, 4, 1))
buf72 = buf71
del buf71
buf73 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf72,
primals_74, primals_75, buf73, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_74
buf74 = extern_kernels.convolution(buf73, primals_76, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf74, (4, 4, 4, 4), (64, 16, 4, 1))
buf75 = buf74
del buf74
triton_poi_fused_convolution_6[grid(256)](buf75, primals_77, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_77
buf76 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_7[grid(768)](buf70, buf73, buf75, primals_78,
buf76, 768, XBLOCK=128, num_warps=4, num_stages=1)
buf77 = extern_kernels.convolution(buf76, primals_79, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf77, (4, 4, 4, 4), (64, 16, 4, 1))
buf78 = buf77
del buf77
buf79 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf78,
primals_80, primals_81, buf79, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_80
buf80 = extern_kernels.convolution(buf79, primals_82, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf80, (4, 4, 4, 4), (64, 16, 4, 1))
buf81 = buf80
del buf80
buf82 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf81,
primals_83, primals_84, buf82, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_83
buf83 = extern_kernels.convolution(buf82, primals_85, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf83, (4, 4, 4, 4), (64, 16, 4, 1))
buf84 = buf83
del buf83
triton_poi_fused_convolution_6[grid(256)](buf84, primals_86, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_86
buf85 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_7[grid(768)](buf79, buf82, buf84, primals_87,
buf85, 768, XBLOCK=128, num_warps=4, num_stages=1)
buf86 = extern_kernels.convolution(buf85, primals_88, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf86, (4, 4, 4, 4), (64, 16, 4, 1))
buf87 = buf86
del buf86
buf88 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf87,
primals_89, primals_90, buf88, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_89
buf89 = extern_kernels.convolution(buf88, primals_91, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf89, (4, 4, 4, 4), (64, 16, 4, 1))
buf90 = buf89
del buf89
buf91 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf90,
primals_92, primals_93, buf91, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_92
buf92 = extern_kernels.convolution(buf91, primals_94, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf92, (4, 4, 4, 4), (64, 16, 4, 1))
buf93 = buf92
del buf92
triton_poi_fused_convolution_6[grid(256)](buf93, primals_95, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_95
buf94 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_7[grid(768)](buf88, buf91, buf93, primals_96,
buf94, 768, XBLOCK=128, num_warps=4, num_stages=1)
buf95 = extern_kernels.convolution(buf94, primals_97, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf95, (4, 16, 4, 4), (256, 16, 4, 1))
buf96 = buf95
del buf95
buf97 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.
float32)
triton_poi_fused__prelu_kernel_convolution_8[grid(1024)](buf96,
primals_98, primals_99, buf97, 1024, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_98
buf98 = extern_kernels.convolution(buf97, primals_100, stride=(1, 1
), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf98, (4, 4, 4, 4), (64, 16, 4, 1))
buf99 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32
)
triton_poi_fused_cat_4[grid(512)](primals_3, buf98, primals_101,
buf99, 512, XBLOCK=256, num_warps=4, num_stages=1)
del primals_101
buf100 = extern_kernels.convolution(buf99, primals_102, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf100, (4, 4, 4, 4), (64, 16, 4, 1))
buf101 = buf100
del buf100
buf102 = buf98
del buf98
triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf101,
primals_103, primals_104, buf102, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_103
buf103 = extern_kernels.convolution(buf102, primals_105, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf103, (4, 4, 4, 4), (64, 16, 4, 1))
buf104 = buf103
del buf103
buf105 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32
)
triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf104,
primals_106, primals_107, buf105, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_106
buf106 = extern_kernels.convolution(buf105, primals_108, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf106, (4, 4, 4, 4), (64, 16, 4, 1))
buf107 = buf106
del buf106
triton_poi_fused_convolution_6[grid(256)](buf107, primals_109, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_109
buf108 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_7[grid(768)](buf102, buf105, buf107,
primals_110, buf108, 768, XBLOCK=128, num_warps=4, num_stages=1)
buf109 = extern_kernels.convolution(buf108, primals_111, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf109, (4, 4, 4, 4), (64, 16, 4, 1))
buf110 = buf109
del buf109
buf111 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32
)
triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf110,
primals_112, primals_113, buf111, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_112
buf112 = extern_kernels.convolution(buf111, primals_114, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf112, (4, 4, 4, 4), (64, 16, 4, 1))
buf113 = buf112
del buf112
buf114 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32
)
triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf113,
primals_115, primals_116, buf114, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_115
buf115 = extern_kernels.convolution(buf114, primals_117, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf115, (4, 4, 4, 4), (64, 16, 4, 1))
buf116 = buf115
del buf115
triton_poi_fused_convolution_6[grid(256)](buf116, primals_118, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_118
buf117 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_7[grid(768)](buf111, buf114, buf116,
primals_119, buf117, 768, XBLOCK=128, num_warps=4, num_stages=1)
buf118 = extern_kernels.convolution(buf117, primals_120, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf118, (4, 4, 4, 4), (64, 16, 4, 1))
buf119 = buf118
del buf118
buf120 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32
)
triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf119,
primals_121, primals_122, buf120, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_121
buf121 = extern_kernels.convolution(buf120, primals_123, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf121, (4, 4, 4, 4), (64, 16, 4, 1))
buf122 = buf121
del buf121
buf123 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32
)
triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf122,
primals_124, primals_125, buf123, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_124
buf124 = extern_kernels.convolution(buf123, primals_126, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf124, (4, 4, 4, 4), (64, 16, 4, 1))
buf125 = buf124
del buf124
triton_poi_fused_convolution_6[grid(256)](buf125, primals_127, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_127
buf126 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_7[grid(768)](buf120, buf123, buf125,
primals_128, buf126, 768, XBLOCK=128, num_warps=4, num_stages=1)
buf127 = extern_kernels.convolution(buf126, primals_129, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf127, (4, 4, 4, 4), (64, 16, 4, 1))
buf128 = buf127
del buf127
buf129 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32
)
triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf128,
primals_130, primals_131, buf129, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_130
buf130 = extern_kernels.convolution(buf129, primals_132, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf130, (4, 4, 4, 4), (64, 16, 4, 1))
buf131 = buf130
del buf130
buf132 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32
)
triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf131,
primals_133, primals_134, buf132, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_133
buf133 = extern_kernels.convolution(buf132, primals_135, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf133, (4, 4, 4, 4), (64, 16, 4, 1))
buf134 = buf133
del buf133
triton_poi_fused_convolution_6[grid(256)](buf134, primals_136, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_136
buf135 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_7[grid(768)](buf129, buf132, buf134,
primals_137, buf135, 768, XBLOCK=128, num_warps=4, num_stages=1)
buf136 = extern_kernels.convolution(buf135, primals_138, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf136, (4, 4, 4, 4), (64, 16, 4, 1))
buf137 = buf136
del buf136
buf138 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32
)
triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf137,
primals_139, primals_140, buf138, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_139
buf139 = extern_kernels.convolution(buf138, primals_141, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf139, (4, 4, 4, 4), (64, 16, 4, 1))
buf140 = buf139
del buf139
buf141 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32
)
triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf140,
primals_142, primals_143, buf141, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_142
buf142 = extern_kernels.convolution(buf141, primals_144, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf142, (4, 4, 4, 4), (64, 16, 4, 1))
buf143 = buf142
del buf142
triton_poi_fused_convolution_6[grid(256)](buf143, primals_145, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_145
buf144 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_7[grid(768)](buf138, buf141, buf143,
primals_146, buf144, 768, XBLOCK=128, num_warps=4, num_stages=1)
buf145 = extern_kernels.convolution(buf144, primals_147, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf145, (4, 16, 4, 4), (256, 16, 4, 1))
buf146 = buf145
del buf145
buf147 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.
float32)
triton_poi_fused__prelu_kernel_convolution_8[grid(1024)](buf146,
primals_148, primals_149, buf147, 1024, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_148
buf148 = extern_kernels.convolution(buf147, primals_150, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf148, (4, 4, 4, 4), (64, 16, 4, 1))
buf149 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_4[grid(512)](primals_3, buf148, primals_151,
buf149, 512, XBLOCK=256, num_warps=4, num_stages=1)
del primals_151
buf150 = extern_kernels.convolution(buf149, primals_152, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf150, (4, 4, 4, 4), (64, 16, 4, 1))
buf151 = buf150
del buf150
buf152 = buf148
del buf148
triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf151,
primals_153, primals_154, buf152, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_153
buf153 = extern_kernels.convolution(buf152, primals_155, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf153, (4, 4, 4, 4), (64, 16, 4, 1))
buf154 = buf153
del buf153
buf155 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32
)
triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf154,
primals_156, primals_157, buf155, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_156
buf156 = extern_kernels.convolution(buf155, primals_158, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf156, (4, 4, 4, 4), (64, 16, 4, 1))
buf157 = buf156
del buf156
triton_poi_fused_convolution_6[grid(256)](buf157, primals_159, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_159
buf158 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_7[grid(768)](buf152, buf155, buf157,
primals_160, buf158, 768, XBLOCK=128, num_warps=4, num_stages=1)
buf159 = extern_kernels.convolution(buf158, primals_161, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf159, (4, 4, 4, 4), (64, 16, 4, 1))
buf160 = buf159
del buf159
buf161 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32
)
triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf160,
primals_162, primals_163, buf161, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_162
buf162 = extern_kernels.convolution(buf161, primals_164, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf162, (4, 4, 4, 4), (64, 16, 4, 1))
buf163 = buf162
del buf162
buf164 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32
)
triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf163,
primals_165, primals_166, buf164, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_165
buf165 = extern_kernels.convolution(buf164, primals_167, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf165, (4, 4, 4, 4), (64, 16, 4, 1))
buf166 = buf165
del buf165
triton_poi_fused_convolution_6[grid(256)](buf166, primals_168, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_168
buf167 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_7[grid(768)](buf161, buf164, buf166,
primals_169, buf167, 768, XBLOCK=128, num_warps=4, num_stages=1)
buf168 = extern_kernels.convolution(buf167, primals_170, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf168, (4, 4, 4, 4), (64, 16, 4, 1))
buf169 = buf168
del buf168
buf170 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32
)
triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf169,
primals_171, primals_172, buf170, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_171
buf171 = extern_kernels.convolution(buf170, primals_173, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf171, (4, 4, 4, 4), (64, 16, 4, 1))
buf172 = buf171
del buf171
buf173 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32
)
triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf172,
primals_174, primals_175, buf173, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_174
buf174 = extern_kernels.convolution(buf173, primals_176, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf174, (4, 4, 4, 4), (64, 16, 4, 1))
buf175 = buf174
del buf174
triton_poi_fused_convolution_6[grid(256)](buf175, primals_177, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_177
buf176 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_7[grid(768)](buf170, buf173, buf175,
primals_178, buf176, 768, XBLOCK=128, num_warps=4, num_stages=1)
buf177 = extern_kernels.convolution(buf176, primals_179, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf177, (4, 4, 4, 4), (64, 16, 4, 1))
buf178 = buf177
del buf177
buf179 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32
)
triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf178,
primals_180, primals_181, buf179, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_180
buf180 = extern_kernels.convolution(buf179, primals_182, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf180, (4, 4, 4, 4), (64, 16, 4, 1))
buf181 = buf180
del buf180
buf182 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32
)
triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf181,
primals_183, primals_184, buf182, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_183
buf183 = extern_kernels.convolution(buf182, primals_185, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf183, (4, 4, 4, 4), (64, 16, 4, 1))
buf184 = buf183
del buf183
triton_poi_fused_convolution_6[grid(256)](buf184, primals_186, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_186
buf185 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_7[grid(768)](buf179, buf182, buf184,
primals_187, buf185, 768, XBLOCK=128, num_warps=4, num_stages=1)
buf186 = extern_kernels.convolution(buf185, primals_188, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf186, (4, 4, 4, 4), (64, 16, 4, 1))
buf187 = buf186
del buf186
buf188 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32
)
triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf187,
primals_189, primals_190, buf188, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_189
buf189 = extern_kernels.convolution(buf188, primals_191, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf189, (4, 4, 4, 4), (64, 16, 4, 1))
buf190 = buf189
del buf189
buf191 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32
)
triton_poi_fused__prelu_kernel_convolution_5[grid(256)](buf190,
primals_192, primals_193, buf191, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_192
buf192 = extern_kernels.convolution(buf191, primals_194, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf192, (4, 4, 4, 4), (64, 16, 4, 1))
buf193 = buf192
del buf192
triton_poi_fused_convolution_6[grid(256)](buf193, primals_195, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_195
buf194 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_7[grid(768)](buf188, buf191, buf193,
primals_196, buf194, 768, XBLOCK=128, num_warps=4, num_stages=1)
buf195 = extern_kernels.convolution(buf194, primals_197, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf195, (4, 16, 4, 4), (256, 16, 4, 1))
buf196 = buf195
del buf195
buf197 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.
float32)
triton_poi_fused__prelu_kernel_convolution_8[grid(1024)](buf196,
primals_198, primals_199, buf197, 1024, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_198
buf198 = extern_kernels.convolution(buf197, primals_200, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf198, (4, 4, 4, 4), (64, 16, 4, 1))
buf199 = buf198
del buf198
triton_poi_fused_convolution_6[grid(256)](buf199, primals_201, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_201
return (buf199, primals_1, primals_3, primals_4, primals_5, primals_7,
primals_8, primals_10, primals_11, primals_13, primals_14,
primals_16, primals_17, primals_19, primals_20, primals_22,
primals_23, primals_25, primals_26, primals_28, primals_29,
primals_31, primals_32, primals_34, primals_35, primals_37,
primals_38, primals_40, primals_41, primals_43, primals_44,
primals_46, primals_47, primals_49, primals_50, primals_52,
primals_54, primals_55, primals_57, primals_58, primals_60,
primals_61, primals_63, primals_64, primals_66, primals_67,
primals_69, primals_70, primals_72, primals_73, primals_75,
primals_76, primals_78, primals_79, primals_81, primals_82,
primals_84, primals_85, primals_87, primals_88, primals_90,
primals_91, primals_93, primals_94, primals_96, primals_97,
primals_99, primals_100, primals_102, primals_104, primals_105,
primals_107, primals_108, primals_110, primals_111, primals_113,
primals_114, primals_116, primals_117, primals_119, primals_120,
primals_122, primals_123, primals_125, primals_126, primals_128,
primals_129, primals_131, primals_132, primals_134, primals_135,
primals_137, primals_138, primals_140, primals_141, primals_143,
primals_144, primals_146, primals_147, primals_149, primals_150,
primals_152, primals_154, primals_155, primals_157, primals_158,
primals_160, primals_161, primals_163, primals_164, primals_166,
primals_167, primals_169, primals_170, primals_172, primals_173,
primals_175, primals_176, primals_178, primals_179, primals_181,
primals_182, primals_184, primals_185, primals_187, primals_188,
primals_190, primals_191, primals_193, primals_194, primals_196,
primals_197, primals_199, primals_200, buf1, buf2, buf4, buf5, buf7,
buf8, buf10, buf11, buf13, buf14, buf16, buf17, buf19, buf20, buf22,
buf23, buf25, buf26, buf28, buf29, buf31, buf32, buf34, buf35,
buf37, buf38, buf40, buf41, buf43, buf44, buf46, buf47, buf49,
buf51, buf52, buf54, buf55, buf57, buf58, buf60, buf61, buf63,
buf64, buf66, buf67, buf69, buf70, buf72, buf73, buf75, buf76,
buf78, buf79, buf81, buf82, buf84, buf85, buf87, buf88, buf90,
buf91, buf93, buf94, buf96, buf97, buf99, buf101, buf102, buf104,
buf105, buf107, buf108, buf110, buf111, buf113, buf114, buf116,
buf117, buf119, buf120, buf122, buf123, buf125, buf126, buf128,
buf129, buf131, buf132, buf134, buf135, buf137, buf138, buf140,
buf141, buf143, buf144, buf146, buf147, buf149, buf151, buf152,
buf154, buf155, buf157, buf158, buf160, buf161, buf163, buf164,
buf166, buf167, buf169, buf170, buf172, buf173, buf175, buf176,
buf178, buf179, buf181, buf182, buf184, buf185, buf187, buf188,
buf190, buf191, buf193, buf194, buf196, buf197)
class concatLayer(nn.Module):
def __init__(self, in_channels, out_channels_perSub, i, j, appendix):
super(concatLayer, self).__init__()
self.firstSub = self.concatLayerSub(in_channels,
out_channels_perSub, '%d_stage%d_' % (i, j) + appendix + '_0')
self.secondSub = self.concatLayerSub(out_channels_perSub,
out_channels_perSub, '%d_stage%d_' % (i, j) + appendix + '_1')
self.thirdSub = self.concatLayerSub(out_channels_perSub,
out_channels_perSub, '%d_stage%d_' % (i, j) + appendix + '_2')
def forward(self, x):
firstSub = self.firstSub(x)
secondSub = self.secondSub(firstSub)
thirdSub = self.thirdSub(secondSub)
out = torch.cat([firstSub, secondSub, thirdSub], 1)
return out
def concatLayerSub(self, in_channels, out_channels, layerName):
concatLayerSubOrdered = OrderedDict()
conv2d = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1)
concatLayerSubOrdered.update({('Mconv' + layerName): conv2d})
concatLayerSubOrdered.update({('Mprelu' + layerName): nn.PReLU(
out_channels)})
return nn.Sequential(concatLayerSubOrdered)
class stage(nn.Module):
def __init__(self, stageID, in_channels, out_channels_perSub,
mid_channels, out_channels, appendix):
super(stage, self).__init__()
self.firstConcat = concatLayer(in_channels, out_channels_perSub, 1,
stageID, appendix)
self.secondConcat = concatLayer(3 * out_channels_perSub,
out_channels_perSub, 2, stageID, appendix)
self.thirdConcat = concatLayer(3 * out_channels_perSub,
out_channels_perSub, 3, stageID, appendix)
self.fourthConcat = concatLayer(3 * out_channels_perSub,
out_channels_perSub, 4, stageID, appendix)
self.fifthConcat = concatLayer(3 * out_channels_perSub,
out_channels_perSub, 5, stageID, appendix)
conv2d = nn.Conv2d(3 * out_channels_perSub, mid_channels,
kernel_size=1, padding=0)
prelu = nn.PReLU(mid_channels)
self.afterConcatsFirst = nn.Sequential(OrderedDict({(
'Mconv6_stage%d_%s' % (stageID, appendix)): conv2d, (
'Mprelu6_stage%d_%s' % (stageID, appendix)): prelu}))
conv2d = nn.Conv2d(mid_channels, out_channels, kernel_size=1, padding=0
)
self.afterConcatsSecond = nn.Sequential(OrderedDict({(
'Mconv7_stage%d_%s' % (stageID, appendix)): conv2d}))
def forward(self, x):
x = self.firstConcat(x)
x = self.secondConcat(x)
x = self.thirdConcat(x)
x = self.fourthConcat(x)
x = self.fifthConcat(x)
x = self.afterConcatsFirst(x)
out = self.afterConcatsSecond(x)
return out
class L2PartNew(nn.Module):
def __init__(self, in_channels, stage_out_channels):
super(L2PartNew, self).__init__()
self.firstStage = stage(0, in_channels, 96, in_channels * 2,
stage_out_channels, 'L2')
self.secondStage = stage(1, in_channels + stage_out_channels,
in_channels, in_channels * 4, stage_out_channels, 'L2')
self.thirdStage = stage(2, in_channels + stage_out_channels,
in_channels, in_channels * 4, stage_out_channels, 'L2')
self.fourthStage = stage(3, in_channels + stage_out_channels,
in_channels, in_channels * 4, stage_out_channels, 'L2')
def forward(self, input_0):
primals_1 = (self.firstStage.firstConcat.firstSub.
Mconv1_stage0_L2_0.weight)
primals_2 = (self.firstStage.firstConcat.firstSub.
Mconv1_stage0_L2_0.bias)
primals_4 = (self.firstStage.firstConcat.firstSub.
Mprelu1_stage0_L2_0.weight)
primals_5 = (self.firstStage.firstConcat.secondSub.
Mconv1_stage0_L2_1.weight)
primals_6 = (self.firstStage.firstConcat.secondSub.
Mconv1_stage0_L2_1.bias)
primals_7 = (self.firstStage.firstConcat.secondSub.
Mprelu1_stage0_L2_1.weight)
primals_8 = (self.firstStage.firstConcat.thirdSub.
Mconv1_stage0_L2_2.weight)
primals_9 = (self.firstStage.firstConcat.thirdSub.
Mconv1_stage0_L2_2.bias)
primals_10 = (self.firstStage.firstConcat.thirdSub.
Mprelu1_stage0_L2_2.weight)
primals_11 = (self.firstStage.secondConcat.firstSub.
Mconv2_stage0_L2_0.weight)
primals_12 = (self.firstStage.secondConcat.firstSub.
Mconv2_stage0_L2_0.bias)
primals_13 = (self.firstStage.secondConcat.firstSub.
Mprelu2_stage0_L2_0.weight)
primals_14 = (self.firstStage.secondConcat.secondSub.
Mconv2_stage0_L2_1.weight)
primals_15 = (self.firstStage.secondConcat.secondSub.
Mconv2_stage0_L2_1.bias)
primals_16 = (self.firstStage.secondConcat.secondSub.
Mprelu2_stage0_L2_1.weight)
primals_17 = (self.firstStage.secondConcat.thirdSub.
Mconv2_stage0_L2_2.weight)
primals_18 = (self.firstStage.secondConcat.thirdSub.
Mconv2_stage0_L2_2.bias)
primals_19 = (self.firstStage.secondConcat.thirdSub.
Mprelu2_stage0_L2_2.weight)
primals_20 = (self.firstStage.thirdConcat.firstSub.
Mconv3_stage0_L2_0.weight)
primals_21 = (self.firstStage.thirdConcat.firstSub.
Mconv3_stage0_L2_0.bias)
primals_22 = (self.firstStage.thirdConcat.firstSub.
Mprelu3_stage0_L2_0.weight)
primals_23 = (self.firstStage.thirdConcat.secondSub.
Mconv3_stage0_L2_1.weight)
primals_24 = (self.firstStage.thirdConcat.secondSub.
Mconv3_stage0_L2_1.bias)
primals_25 = (self.firstStage.thirdConcat.secondSub.
Mprelu3_stage0_L2_1.weight)
primals_26 = (self.firstStage.thirdConcat.thirdSub.
Mconv3_stage0_L2_2.weight)
primals_27 = (self.firstStage.thirdConcat.thirdSub.
Mconv3_stage0_L2_2.bias)
primals_28 = (self.firstStage.thirdConcat.thirdSub.
Mprelu3_stage0_L2_2.weight)
primals_29 = (self.firstStage.fourthConcat.firstSub.
Mconv4_stage0_L2_0.weight)
primals_30 = (self.firstStage.fourthConcat.firstSub.
Mconv4_stage0_L2_0.bias)
primals_31 = (self.firstStage.fourthConcat.firstSub.
Mprelu4_stage0_L2_0.weight)
primals_32 = (self.firstStage.fourthConcat.secondSub.
Mconv4_stage0_L2_1.weight)
primals_33 = (self.firstStage.fourthConcat.secondSub.
Mconv4_stage0_L2_1.bias)
primals_34 = (self.firstStage.fourthConcat.secondSub.
Mprelu4_stage0_L2_1.weight)
primals_35 = (self.firstStage.fourthConcat.thirdSub.
Mconv4_stage0_L2_2.weight)
primals_36 = (self.firstStage.fourthConcat.thirdSub.
Mconv4_stage0_L2_2.bias)
primals_37 = (self.firstStage.fourthConcat.thirdSub.
Mprelu4_stage0_L2_2.weight)
primals_38 = (self.firstStage.fifthConcat.firstSub.
Mconv5_stage0_L2_0.weight)
primals_39 = (self.firstStage.fifthConcat.firstSub.
Mconv5_stage0_L2_0.bias)
primals_40 = (self.firstStage.fifthConcat.firstSub.
Mprelu5_stage0_L2_0.weight)
primals_41 = (self.firstStage.fifthConcat.secondSub.
Mconv5_stage0_L2_1.weight)
primals_42 = (self.firstStage.fifthConcat.secondSub.
Mconv5_stage0_L2_1.bias)
primals_43 = (self.firstStage.fifthConcat.secondSub.
Mprelu5_stage0_L2_1.weight)
primals_44 = (self.firstStage.fifthConcat.thirdSub.
Mconv5_stage0_L2_2.weight)
primals_45 = (self.firstStage.fifthConcat.thirdSub.
Mconv5_stage0_L2_2.bias)
primals_46 = (self.firstStage.fifthConcat.thirdSub.
Mprelu5_stage0_L2_2.weight)
primals_47 = self.firstStage.afterConcatsFirst.Mconv6_stage0_L2.weight
primals_48 = self.firstStage.afterConcatsFirst.Mconv6_stage0_L2.bias
primals_49 = self.firstStage.afterConcatsFirst.Mprelu6_stage0_L2.weight
primals_50 = self.firstStage.afterConcatsSecond.Mconv7_stage0_L2.weight
primals_51 = self.firstStage.afterConcatsSecond.Mconv7_stage0_L2.bias
primals_52 = (self.secondStage.firstConcat.firstSub.
Mconv1_stage1_L2_0.weight)
primals_53 = (self.secondStage.firstConcat.firstSub.
Mconv1_stage1_L2_0.bias)
primals_54 = (self.secondStage.firstConcat.firstSub.
Mprelu1_stage1_L2_0.weight)
primals_55 = (self.secondStage.firstConcat.secondSub.
Mconv1_stage1_L2_1.weight)
primals_56 = (self.secondStage.firstConcat.secondSub.
Mconv1_stage1_L2_1.bias)
primals_57 = (self.secondStage.firstConcat.secondSub.
Mprelu1_stage1_L2_1.weight)
primals_58 = (self.secondStage.firstConcat.thirdSub.
Mconv1_stage1_L2_2.weight)
primals_59 = (self.secondStage.firstConcat.thirdSub.
Mconv1_stage1_L2_2.bias)
primals_60 = (self.secondStage.firstConcat.thirdSub.
Mprelu1_stage1_L2_2.weight)
primals_61 = (self.secondStage.secondConcat.firstSub.
Mconv2_stage1_L2_0.weight)
primals_62 = (self.secondStage.secondConcat.firstSub.
Mconv2_stage1_L2_0.bias)
primals_63 = (self.secondStage.secondConcat.firstSub.
Mprelu2_stage1_L2_0.weight)
primals_64 = (self.secondStage.secondConcat.secondSub.
Mconv2_stage1_L2_1.weight)
primals_65 = (self.secondStage.secondConcat.secondSub.
Mconv2_stage1_L2_1.bias)
primals_66 = (self.secondStage.secondConcat.secondSub.
Mprelu2_stage1_L2_1.weight)
primals_67 = (self.secondStage.secondConcat.thirdSub.
Mconv2_stage1_L2_2.weight)
primals_68 = (self.secondStage.secondConcat.thirdSub.
Mconv2_stage1_L2_2.bias)
primals_69 = (self.secondStage.secondConcat.thirdSub.
Mprelu2_stage1_L2_2.weight)
primals_70 = (self.secondStage.thirdConcat.firstSub.
Mconv3_stage1_L2_0.weight)
primals_71 = (self.secondStage.thirdConcat.firstSub.
Mconv3_stage1_L2_0.bias)
primals_72 = (self.secondStage.thirdConcat.firstSub.
Mprelu3_stage1_L2_0.weight)
primals_73 = (self.secondStage.thirdConcat.secondSub.
Mconv3_stage1_L2_1.weight)
primals_74 = (self.secondStage.thirdConcat.secondSub.
Mconv3_stage1_L2_1.bias)
primals_75 = (self.secondStage.thirdConcat.secondSub.
Mprelu3_stage1_L2_1.weight)
primals_76 = (self.secondStage.thirdConcat.thirdSub.
Mconv3_stage1_L2_2.weight)
primals_77 = (self.secondStage.thirdConcat.thirdSub.
Mconv3_stage1_L2_2.bias)
primals_78 = (self.secondStage.thirdConcat.thirdSub.
Mprelu3_stage1_L2_2.weight)
primals_79 = (self.secondStage.fourthConcat.firstSub.
Mconv4_stage1_L2_0.weight)
primals_80 = (self.secondStage.fourthConcat.firstSub.
Mconv4_stage1_L2_0.bias)
primals_81 = (self.secondStage.fourthConcat.firstSub.
Mprelu4_stage1_L2_0.weight)
primals_82 = (self.secondStage.fourthConcat.secondSub.
Mconv4_stage1_L2_1.weight)
primals_83 = (self.secondStage.fourthConcat.secondSub.
Mconv4_stage1_L2_1.bias)
primals_84 = (self.secondStage.fourthConcat.secondSub.
Mprelu4_stage1_L2_1.weight)
primals_85 = (self.secondStage.fourthConcat.thirdSub.
Mconv4_stage1_L2_2.weight)
primals_86 = (self.secondStage.fourthConcat.thirdSub.
Mconv4_stage1_L2_2.bias)
primals_87 = (self.secondStage.fourthConcat.thirdSub.
Mprelu4_stage1_L2_2.weight)
primals_88 = (self.secondStage.fifthConcat.firstSub.
Mconv5_stage1_L2_0.weight)
primals_89 = (self.secondStage.fifthConcat.firstSub.
Mconv5_stage1_L2_0.bias)
primals_90 = (self.secondStage.fifthConcat.firstSub.
Mprelu5_stage1_L2_0.weight)
primals_91 = (self.secondStage.fifthConcat.secondSub.
Mconv5_stage1_L2_1.weight)
primals_92 = (self.secondStage.fifthConcat.secondSub.
Mconv5_stage1_L2_1.bias)
primals_93 = (self.secondStage.fifthConcat.secondSub.
Mprelu5_stage1_L2_1.weight)
primals_94 = (self.secondStage.fifthConcat.thirdSub.
Mconv5_stage1_L2_2.weight)
primals_95 = (self.secondStage.fifthConcat.thirdSub.
Mconv5_stage1_L2_2.bias)
primals_96 = (self.secondStage.fifthConcat.thirdSub.
Mprelu5_stage1_L2_2.weight)
primals_97 = self.secondStage.afterConcatsFirst.Mconv6_stage1_L2.weight
primals_98 = self.secondStage.afterConcatsFirst.Mconv6_stage1_L2.bias
primals_99 = (self.secondStage.afterConcatsFirst.Mprelu6_stage1_L2.
weight)
primals_100 = (self.secondStage.afterConcatsSecond.Mconv7_stage1_L2
.weight)
primals_101 = self.secondStage.afterConcatsSecond.Mconv7_stage1_L2.bias
primals_102 = (self.thirdStage.firstConcat.firstSub.
Mconv1_stage2_L2_0.weight)
primals_103 = (self.thirdStage.firstConcat.firstSub.
Mconv1_stage2_L2_0.bias)
primals_104 = (self.thirdStage.firstConcat.firstSub.
Mprelu1_stage2_L2_0.weight)
primals_105 = (self.thirdStage.firstConcat.secondSub.
Mconv1_stage2_L2_1.weight)
primals_106 = (self.thirdStage.firstConcat.secondSub.
Mconv1_stage2_L2_1.bias)
primals_107 = (self.thirdStage.firstConcat.secondSub.
Mprelu1_stage2_L2_1.weight)
primals_108 = (self.thirdStage.firstConcat.thirdSub.
Mconv1_stage2_L2_2.weight)
primals_109 = (self.thirdStage.firstConcat.thirdSub.
Mconv1_stage2_L2_2.bias)
primals_110 = (self.thirdStage.firstConcat.thirdSub.
Mprelu1_stage2_L2_2.weight)
primals_111 = (self.thirdStage.secondConcat.firstSub.
Mconv2_stage2_L2_0.weight)
primals_112 = (self.thirdStage.secondConcat.firstSub.
Mconv2_stage2_L2_0.bias)
primals_113 = (self.thirdStage.secondConcat.firstSub.
Mprelu2_stage2_L2_0.weight)
primals_114 = (self.thirdStage.secondConcat.secondSub.
Mconv2_stage2_L2_1.weight)
primals_115 = (self.thirdStage.secondConcat.secondSub.
Mconv2_stage2_L2_1.bias)
primals_116 = (self.thirdStage.secondConcat.secondSub.
Mprelu2_stage2_L2_1.weight)
primals_117 = (self.thirdStage.secondConcat.thirdSub.
Mconv2_stage2_L2_2.weight)
primals_118 = (self.thirdStage.secondConcat.thirdSub.
Mconv2_stage2_L2_2.bias)
primals_119 = (self.thirdStage.secondConcat.thirdSub.
Mprelu2_stage2_L2_2.weight)
primals_120 = (self.thirdStage.thirdConcat.firstSub.
Mconv3_stage2_L2_0.weight)
primals_121 = (self.thirdStage.thirdConcat.firstSub.
Mconv3_stage2_L2_0.bias)
primals_122 = (self.thirdStage.thirdConcat.firstSub.
Mprelu3_stage2_L2_0.weight)
primals_123 = (self.thirdStage.thirdConcat.secondSub.
Mconv3_stage2_L2_1.weight)
primals_124 = (self.thirdStage.thirdConcat.secondSub.
Mconv3_stage2_L2_1.bias)
primals_125 = (self.thirdStage.thirdConcat.secondSub.
Mprelu3_stage2_L2_1.weight)
primals_126 = (self.thirdStage.thirdConcat.thirdSub.
Mconv3_stage2_L2_2.weight)
primals_127 = (self.thirdStage.thirdConcat.thirdSub.
Mconv3_stage2_L2_2.bias)
primals_128 = (self.thirdStage.thirdConcat.thirdSub.
Mprelu3_stage2_L2_2.weight)
primals_129 = (self.thirdStage.fourthConcat.firstSub.
Mconv4_stage2_L2_0.weight)
primals_130 = (self.thirdStage.fourthConcat.firstSub.
Mconv4_stage2_L2_0.bias)
primals_131 = (self.thirdStage.fourthConcat.firstSub.
Mprelu4_stage2_L2_0.weight)
primals_132 = (self.thirdStage.fourthConcat.secondSub.
Mconv4_stage2_L2_1.weight)
primals_133 = (self.thirdStage.fourthConcat.secondSub.
Mconv4_stage2_L2_1.bias)
primals_134 = (self.thirdStage.fourthConcat.secondSub.
Mprelu4_stage2_L2_1.weight)
primals_135 = (self.thirdStage.fourthConcat.thirdSub.
Mconv4_stage2_L2_2.weight)
primals_136 = (self.thirdStage.fourthConcat.thirdSub.
Mconv4_stage2_L2_2.bias)
primals_137 = (self.thirdStage.fourthConcat.thirdSub.
Mprelu4_stage2_L2_2.weight)
primals_138 = (self.thirdStage.fifthConcat.firstSub.
Mconv5_stage2_L2_0.weight)
primals_139 = (self.thirdStage.fifthConcat.firstSub.
Mconv5_stage2_L2_0.bias)
primals_140 = (self.thirdStage.fifthConcat.firstSub.
Mprelu5_stage2_L2_0.weight)
primals_141 = (self.thirdStage.fifthConcat.secondSub.
Mconv5_stage2_L2_1.weight)
primals_142 = (self.thirdStage.fifthConcat.secondSub.
Mconv5_stage2_L2_1.bias)
primals_143 = (self.thirdStage.fifthConcat.secondSub.
Mprelu5_stage2_L2_1.weight)
primals_144 = (self.thirdStage.fifthConcat.thirdSub.
Mconv5_stage2_L2_2.weight)
primals_145 = (self.thirdStage.fifthConcat.thirdSub.
Mconv5_stage2_L2_2.bias)
primals_146 = (self.thirdStage.fifthConcat.thirdSub.
Mprelu5_stage2_L2_2.weight)
primals_147 = self.thirdStage.afterConcatsFirst.Mconv6_stage2_L2.weight
primals_148 = self.thirdStage.afterConcatsFirst.Mconv6_stage2_L2.bias
primals_149 = (self.thirdStage.afterConcatsFirst.Mprelu6_stage2_L2.
weight)
primals_150 = (self.thirdStage.afterConcatsSecond.Mconv7_stage2_L2.
weight)
primals_151 = self.thirdStage.afterConcatsSecond.Mconv7_stage2_L2.bias
primals_152 = (self.fourthStage.firstConcat.firstSub.
Mconv1_stage3_L2_0.weight)
primals_153 = (self.fourthStage.firstConcat.firstSub.
Mconv1_stage3_L2_0.bias)
primals_154 = (self.fourthStage.firstConcat.firstSub.
Mprelu1_stage3_L2_0.weight)
primals_155 = (self.fourthStage.firstConcat.secondSub.
Mconv1_stage3_L2_1.weight)
primals_156 = (self.fourthStage.firstConcat.secondSub.
Mconv1_stage3_L2_1.bias)
primals_157 = (self.fourthStage.firstConcat.secondSub.
Mprelu1_stage3_L2_1.weight)
primals_158 = (self.fourthStage.firstConcat.thirdSub.
Mconv1_stage3_L2_2.weight)
primals_159 = (self.fourthStage.firstConcat.thirdSub.
Mconv1_stage3_L2_2.bias)
primals_160 = (self.fourthStage.firstConcat.thirdSub.
Mprelu1_stage3_L2_2.weight)
primals_161 = (self.fourthStage.secondConcat.firstSub.
Mconv2_stage3_L2_0.weight)
primals_162 = (self.fourthStage.secondConcat.firstSub.
Mconv2_stage3_L2_0.bias)
primals_163 = (self.fourthStage.secondConcat.firstSub.
Mprelu2_stage3_L2_0.weight)
primals_164 = (self.fourthStage.secondConcat.secondSub.
Mconv2_stage3_L2_1.weight)
primals_165 = (self.fourthStage.secondConcat.secondSub.
Mconv2_stage3_L2_1.bias)
primals_166 = (self.fourthStage.secondConcat.secondSub.
Mprelu2_stage3_L2_1.weight)
primals_167 = (self.fourthStage.secondConcat.thirdSub.
Mconv2_stage3_L2_2.weight)
primals_168 = (self.fourthStage.secondConcat.thirdSub.
Mconv2_stage3_L2_2.bias)
primals_169 = (self.fourthStage.secondConcat.thirdSub.
Mprelu2_stage3_L2_2.weight)
primals_170 = (self.fourthStage.thirdConcat.firstSub.
Mconv3_stage3_L2_0.weight)
primals_171 = (self.fourthStage.thirdConcat.firstSub.
Mconv3_stage3_L2_0.bias)
primals_172 = (self.fourthStage.thirdConcat.firstSub.
Mprelu3_stage3_L2_0.weight)
primals_173 = (self.fourthStage.thirdConcat.secondSub.
Mconv3_stage3_L2_1.weight)
primals_174 = (self.fourthStage.thirdConcat.secondSub.
Mconv3_stage3_L2_1.bias)
primals_175 = (self.fourthStage.thirdConcat.secondSub.
Mprelu3_stage3_L2_1.weight)
primals_176 = (self.fourthStage.thirdConcat.thirdSub.
Mconv3_stage3_L2_2.weight)
primals_177 = (self.fourthStage.thirdConcat.thirdSub.
Mconv3_stage3_L2_2.bias)
primals_178 = (self.fourthStage.thirdConcat.thirdSub.
Mprelu3_stage3_L2_2.weight)
primals_179 = (self.fourthStage.fourthConcat.firstSub.
Mconv4_stage3_L2_0.weight)
primals_180 = (self.fourthStage.fourthConcat.firstSub.
Mconv4_stage3_L2_0.bias)
primals_181 = (self.fourthStage.fourthConcat.firstSub.
Mprelu4_stage3_L2_0.weight)
primals_182 = (self.fourthStage.fourthConcat.secondSub.
Mconv4_stage3_L2_1.weight)
primals_183 = (self.fourthStage.fourthConcat.secondSub.
Mconv4_stage3_L2_1.bias)
primals_184 = (self.fourthStage.fourthConcat.secondSub.
Mprelu4_stage3_L2_1.weight)
primals_185 = (self.fourthStage.fourthConcat.thirdSub.
Mconv4_stage3_L2_2.weight)
primals_186 = (self.fourthStage.fourthConcat.thirdSub.
Mconv4_stage3_L2_2.bias)
primals_187 = (self.fourthStage.fourthConcat.thirdSub.
Mprelu4_stage3_L2_2.weight)
primals_188 = (self.fourthStage.fifthConcat.firstSub.
Mconv5_stage3_L2_0.weight)
primals_189 = (self.fourthStage.fifthConcat.firstSub.
Mconv5_stage3_L2_0.bias)
primals_190 = (self.fourthStage.fifthConcat.firstSub.
Mprelu5_stage3_L2_0.weight)
primals_191 = (self.fourthStage.fifthConcat.secondSub.
Mconv5_stage3_L2_1.weight)
primals_192 = (self.fourthStage.fifthConcat.secondSub.
Mconv5_stage3_L2_1.bias)
primals_193 = (self.fourthStage.fifthConcat.secondSub.
Mprelu5_stage3_L2_1.weight)
primals_194 = (self.fourthStage.fifthConcat.thirdSub.
Mconv5_stage3_L2_2.weight)
primals_195 = (self.fourthStage.fifthConcat.thirdSub.
Mconv5_stage3_L2_2.bias)
primals_196 = (self.fourthStage.fifthConcat.thirdSub.
Mprelu5_stage3_L2_2.weight)
primals_197 = (self.fourthStage.afterConcatsFirst.Mconv6_stage3_L2.
weight)
primals_198 = self.fourthStage.afterConcatsFirst.Mconv6_stage3_L2.bias
primals_199 = (self.fourthStage.afterConcatsFirst.Mprelu6_stage3_L2
.weight)
primals_200 = (self.fourthStage.afterConcatsSecond.Mconv7_stage3_L2
.weight)
primals_201 = self.fourthStage.afterConcatsSecond.Mconv7_stage3_L2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40, primals_41, primals_42, primals_43, primals_44,
primals_45, primals_46, primals_47, primals_48, primals_49,
primals_50, primals_51, primals_52, primals_53, primals_54,
primals_55, primals_56, primals_57, primals_58, primals_59,
primals_60, primals_61, primals_62, primals_63, primals_64,
primals_65, primals_66, primals_67, primals_68, primals_69,
primals_70, primals_71, primals_72, primals_73, primals_74,
primals_75, primals_76, primals_77, primals_78, primals_79,
primals_80, primals_81, primals_82, primals_83, primals_84,
primals_85, primals_86, primals_87, primals_88, primals_89,
primals_90, primals_91, primals_92, primals_93, primals_94,
primals_95, primals_96, primals_97, primals_98, primals_99,
primals_100, primals_101, primals_102, primals_103, primals_104,
primals_105, primals_106, primals_107, primals_108, primals_109,
primals_110, primals_111, primals_112, primals_113, primals_114,
primals_115, primals_116, primals_117, primals_118, primals_119,
primals_120, primals_121, primals_122, primals_123, primals_124,
primals_125, primals_126, primals_127, primals_128, primals_129,
primals_130, primals_131, primals_132, primals_133, primals_134,
primals_135, primals_136, primals_137, primals_138, primals_139,
primals_140, primals_141, primals_142, primals_143, primals_144,
primals_145, primals_146, primals_147, primals_148, primals_149,
primals_150, primals_151, primals_152, primals_153, primals_154,
primals_155, primals_156, primals_157, primals_158, primals_159,
primals_160, primals_161, primals_162, primals_163, primals_164,
primals_165, primals_166, primals_167, primals_168, primals_169,
primals_170, primals_171, primals_172, primals_173, primals_174,
primals_175, primals_176, primals_177, primals_178, primals_179,
primals_180, primals_181, primals_182, primals_183, primals_184,
primals_185, primals_186, primals_187, primals_188, primals_189,
primals_190, primals_191, primals_192, primals_193, primals_194,
primals_195, primals_196, primals_197, primals_198, primals_199,
primals_200, primals_201])
return output[0]
| EddieMG/LateTemporalModeling3DCNN | L2Part | false | 2,363 | [
"MIT"
] | 0 | 94c87dc1d31d09bc310d0e735a2e55453976cb0d | https://github.com/EddieMG/LateTemporalModeling3DCNN/tree/94c87dc1d31d09bc310d0e735a2e55453976cb0d | import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from itertools import chain as chain
from collections import OrderedDict
import torch.hub
class concatLayer(nn.Module):
def __init__(self, in_channels, out_channels_perSub, i, j, appendix):
super().__init__()
self.firstSub = self.concatLayerSub(in_channels,
out_channels_perSub, '%d_stage%d_' % (i, j) + appendix + '_0')
self.secondSub = self.concatLayerSub(out_channels_perSub,
out_channels_perSub, '%d_stage%d_' % (i, j) + appendix + '_1')
self.thirdSub = self.concatLayerSub(out_channels_perSub,
out_channels_perSub, '%d_stage%d_' % (i, j) + appendix + '_2')
def forward(self, x):
firstSub = self.firstSub(x)
secondSub = self.secondSub(firstSub)
thirdSub = self.thirdSub(secondSub)
out = torch.cat([firstSub, secondSub, thirdSub], 1)
return out
def concatLayerSub(self, in_channels, out_channels, layerName):
concatLayerSubOrdered = OrderedDict()
conv2d = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1)
concatLayerSubOrdered.update({('Mconv' + layerName): conv2d})
concatLayerSubOrdered.update({('Mprelu' + layerName): nn.PReLU(
out_channels)})
return nn.Sequential(concatLayerSubOrdered)
class stage(nn.Module):
def __init__(self, stageID, in_channels, out_channels_perSub,
mid_channels, out_channels, appendix):
super().__init__()
self.firstConcat = concatLayer(in_channels, out_channels_perSub, 1,
stageID, appendix)
self.secondConcat = concatLayer(3 * out_channels_perSub,
out_channels_perSub, 2, stageID, appendix)
self.thirdConcat = concatLayer(3 * out_channels_perSub,
out_channels_perSub, 3, stageID, appendix)
self.fourthConcat = concatLayer(3 * out_channels_perSub,
out_channels_perSub, 4, stageID, appendix)
self.fifthConcat = concatLayer(3 * out_channels_perSub,
out_channels_perSub, 5, stageID, appendix)
conv2d = nn.Conv2d(3 * out_channels_perSub, mid_channels,
kernel_size=1, padding=0)
prelu = nn.PReLU(mid_channels)
self.afterConcatsFirst = nn.Sequential(OrderedDict({(
'Mconv6_stage%d_%s' % (stageID, appendix)): conv2d, (
'Mprelu6_stage%d_%s' % (stageID, appendix)): prelu}))
conv2d = nn.Conv2d(mid_channels, out_channels, kernel_size=1, padding=0
)
self.afterConcatsSecond = nn.Sequential(OrderedDict({(
'Mconv7_stage%d_%s' % (stageID, appendix)): conv2d}))
def forward(self, x):
x = self.firstConcat(x)
x = self.secondConcat(x)
x = self.thirdConcat(x)
x = self.fourthConcat(x)
x = self.fifthConcat(x)
x = self.afterConcatsFirst(x)
out = self.afterConcatsSecond(x)
return out
class Model(nn.Module):
def __init__(self, in_channels, stage_out_channels):
super().__init__()
self.firstStage = stage(0, in_channels, 96, in_channels * 2,
stage_out_channels, 'L2')
self.secondStage = stage(1, in_channels + stage_out_channels,
in_channels, in_channels * 4, stage_out_channels, 'L2')
self.thirdStage = stage(2, in_channels + stage_out_channels,
in_channels, in_channels * 4, stage_out_channels, 'L2')
self.fourthStage = stage(3, in_channels + stage_out_channels,
in_channels, in_channels * 4, stage_out_channels, 'L2')
def forward(self, features):
x = self.firstStage(features)
x = torch.cat([features, x], 1)
x = self.secondStage(x)
x = torch.cat([features, x], 1)
x = self.thirdStage(x)
x = torch.cat([features, x], 1)
out = self.fourthStage(x)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_i
# ... truncated (>4000 chars) for memory efficiency |
BasicBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/6q/c6q46q7lsepa4jw5qgcgbc5kiud5wm57hubk6vfo4gk47vl2tprk.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# out => relu
# Graph fragment:
# %relu : [num_users=4] = call_function[target=torch.ops.aten.relu.default](args = (%primals_1,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/4e/c4efs56ymyev6yow4ruutakn3po5nni7rvtifmzxqreckdzecoje.py
# Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# out_1 => convolution
# out_2 => relu_1
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/li/clisyfh7uy7myv7uicl6ym42hf2x575nogmdoxa7aohhuh54uign.py
# Topologically Sorted Source Nodes: [out_3, out_4], Original ATen: [aten.convolution, aten.add]
# Source node to ATen node mapping:
# out_3 => convolution_1
# out_4 => add
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, %relu), kwargs = {})
# %copy_ : [num_users=0] = call_function[target=torch.ops.aten.copy_.default](args = (%primals_1, %relu), kwargs = {})
triton_poi_fused_add_convolution_2 = async_compile.triton('triton_poi_fused_add_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_2', 'mutated_arg_names': ['in_out_ptr0', 'out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_2(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x3), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr0 + (x3), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf2, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [out_3, out_4], Original ATen: [aten.convolution, aten.add]
triton_poi_fused_add_convolution_2.run(buf4, primals_5, buf0, primals_1, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_5
return (buf4, primals_2, primals_4, buf0, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
def apply_init_(modules):
"""
Initialize NN modules
"""
for m in modules:
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class Conv2d_tf(nn.Conv2d):
"""
Conv2d with the padding behavior from TF
"""
def __init__(self, *args, **kwargs):
super(Conv2d_tf, self).__init__(*args, **kwargs)
self.padding = kwargs.get('padding', 'SAME')
def _compute_padding(self, input, dim):
input_size = input.size(dim + 2)
filter_size = self.weight.size(dim + 2)
effective_filter_size = (filter_size - 1) * self.dilation[dim] + 1
out_size = (input_size + self.stride[dim] - 1) // self.stride[dim]
total_padding = max(0, (out_size - 1) * self.stride[dim] +
effective_filter_size - input_size)
additional_padding = int(total_padding % 2 != 0)
return additional_padding, total_padding
def forward(self, input):
if self.padding == 'VALID':
return F.conv2d(input, self.weight, self.bias, self.stride,
padding=0, dilation=self.dilation, groups=self.groups)
rows_odd, padding_rows = self._compute_padding(input, dim=0)
cols_odd, padding_cols = self._compute_padding(input, dim=1)
if rows_odd or cols_odd:
input = F.pad(input, [0, cols_odd, 0, rows_odd])
return F.conv2d(input, self.weight, self.bias, self.stride, padding
=(padding_rows // 2, padding_cols // 2), dilation=self.dilation,
groups=self.groups)
class BasicBlock(nn.Module):
"""
Residual Network Block
"""
def __init__(self, n_channels, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = Conv2d_tf(n_channels, n_channels, kernel_size=3,
stride=1, padding=(1, 1))
self.relu = nn.ReLU(inplace=True)
self.conv2 = Conv2d_tf(n_channels, n_channels, kernel_size=3,
stride=1, padding=(1, 1))
self.stride = stride
apply_init_(self.modules())
self.train()
def forward(self, x):
identity = x
out = self.relu(x)
out = self.conv1(out)
out = self.relu(out)
out = self.conv2(out)
out += identity
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_convolution_2(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr0 + x3, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_relu_0[grid(256)](primals_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_relu_1[grid(256)](buf2, primals_3, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3
del buf3
triton_poi_fused_add_convolution_2[grid(256)](buf4, primals_5, buf0,
primals_1, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_5
return buf4, primals_2, primals_4, buf0, buf2
def apply_init_(modules):
"""
Initialize NN modules
"""
for m in modules:
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class Conv2d_tf(nn.Conv2d):
"""
Conv2d with the padding behavior from TF
"""
def __init__(self, *args, **kwargs):
super(Conv2d_tf, self).__init__(*args, **kwargs)
self.padding = kwargs.get('padding', 'SAME')
def _compute_padding(self, input, dim):
input_size = input.size(dim + 2)
filter_size = self.weight.size(dim + 2)
effective_filter_size = (filter_size - 1) * self.dilation[dim] + 1
out_size = (input_size + self.stride[dim] - 1) // self.stride[dim]
total_padding = max(0, (out_size - 1) * self.stride[dim] +
effective_filter_size - input_size)
additional_padding = int(total_padding % 2 != 0)
return additional_padding, total_padding
def forward(self, input):
if self.padding == 'VALID':
return F.conv2d(input, self.weight, self.bias, self.stride,
padding=0, dilation=self.dilation, groups=self.groups)
rows_odd, padding_rows = self._compute_padding(input, dim=0)
cols_odd, padding_cols = self._compute_padding(input, dim=1)
if rows_odd or cols_odd:
input = F.pad(input, [0, cols_odd, 0, rows_odd])
return F.conv2d(input, self.weight, self.bias, self.stride, padding
=(padding_rows // 2, padding_cols // 2), dilation=self.dilation,
groups=self.groups)
class BasicBlockNew(nn.Module):
"""
Residual Network Block
"""
def __init__(self, n_channels, stride=1):
super(BasicBlockNew, self).__init__()
self.conv1 = Conv2d_tf(n_channels, n_channels, kernel_size=3,
stride=1, padding=(1, 1))
self.relu = nn.ReLU(inplace=True)
self.conv2 = Conv2d_tf(n_channels, n_channels, kernel_size=3,
stride=1, padding=(1, 1))
self.stride = stride
apply_init_(self.modules())
self.train()
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| IanYHWu/msc_2021 | BasicBlock | false | 2,364 | [
"MIT"
] | 0 | 0ae09ed392cce5fdf0e85d1f96b7af82900835f8 | https://github.com/IanYHWu/msc_2021/tree/0ae09ed392cce5fdf0e85d1f96b7af82900835f8 | import torch
import torch.nn as nn
import torch.nn.functional as F
def apply_init_(modules):
"""
Initialize NN modules
"""
for m in modules:
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class Conv2d_tf(nn.Conv2d):
"""
Conv2d with the padding behavior from TF
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.padding = kwargs.get('padding', 'SAME')
def _compute_padding(self, input, dim):
input_size = input.size(dim + 2)
filter_size = self.weight.size(dim + 2)
effective_filter_size = (filter_size - 1) * self.dilation[dim] + 1
out_size = (input_size + self.stride[dim] - 1) // self.stride[dim]
total_padding = max(0, (out_size - 1) * self.stride[dim] +
effective_filter_size - input_size)
additional_padding = int(total_padding % 2 != 0)
return additional_padding, total_padding
def forward(self, input):
if self.padding == 'VALID':
return F.conv2d(input, self.weight, self.bias, self.stride,
padding=0, dilation=self.dilation, groups=self.groups)
rows_odd, padding_rows = self._compute_padding(input, dim=0)
cols_odd, padding_cols = self._compute_padding(input, dim=1)
if rows_odd or cols_odd:
input = F.pad(input, [0, cols_odd, 0, rows_odd])
return F.conv2d(input, self.weight, self.bias, self.stride, padding
=(padding_rows // 2, padding_cols // 2), dilation=self.dilation,
groups=self.groups)
class Model(nn.Module):
"""
Residual Network Block
"""
def __init__(self, n_channels, stride=1):
super().__init__()
self.conv1 = Conv2d_tf(n_channels, n_channels, kernel_size=3,
stride=1, padding=(1, 1))
self.relu = nn.ReLU(inplace=True)
self.conv2 = Conv2d_tf(n_channels, n_channels, kernel_size=3,
stride=1, padding=(1, 1))
self.stride = stride
apply_init_(self.modules())
self.train()
def forward(self, x):
identity = x
out = self.relu(x)
out = self.conv1(out)
out = self.relu(out)
out = self.conv2(out)
out += identity
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
InputInjection | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/nw/cnwstmvf4avgqqw5lh4fg5fqhyxv6b637lj7cpurr4it7ajwhzi5.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# x => avg_pool2d
# Graph fragment:
# %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%arg0_1, [3, 3], [2, 2], [1, 1]), kwargs = {})
triton_poi_fused_avg_pool2d_0 = async_compile.triton('triton_poi_fused_avg_pool2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 2) % 2
x0 = xindex % 2
x3 = (xindex // 2)
x4 = xindex
tmp0 = (-1) + (2*x1)
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = (-1) + (2*x0)
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + ((-5) + (2*x0) + (8*x3)), tmp10 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = 2*x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + ((-4) + (2*x0) + (8*x3)), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tmp17 + tmp11
tmp19 = 1 + (2*x0)
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + ((-3) + (2*x0) + (8*x3)), tmp23 & xmask, eviction_policy='evict_last', other=0.0)
tmp25 = tmp24 + tmp18
tmp26 = 2*x1
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + ((-1) + (2*x0) + (8*x3)), tmp30 & xmask, eviction_policy='evict_last', other=0.0)
tmp32 = tmp31 + tmp25
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + ((2*x0) + (8*x3)), tmp33 & xmask, eviction_policy='evict_last', other=0.0)
tmp35 = tmp34 + tmp32
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (1 + (2*x0) + (8*x3)), tmp36 & xmask, eviction_policy='evict_last', other=0.0)
tmp38 = tmp37 + tmp35
tmp39 = 1 + (2*x1)
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (3 + (2*x0) + (8*x3)), tmp43 & xmask, eviction_policy='evict_last', other=0.0)
tmp45 = tmp44 + tmp38
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4 + (2*x0) + (8*x3)), tmp46 & xmask, eviction_policy='evict_last', other=0.0)
tmp48 = tmp47 + tmp45
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (5 + (2*x0) + (8*x3)), tmp49 & xmask, eviction_policy='evict_last', other=0.0)
tmp51 = tmp50 + tmp48
tmp52 = 1 + ((-2)*x0) + ((-2)*x1) + (((5) * ((5) <= (2 + (2*x0))) + (2 + (2*x0)) * ((2 + (2*x0)) < (5)))*((5) * ((5) <= (2 + (2*x1))) + (2 + (2*x1)) * ((2 + (2*x1)) < (5)))) + ((-2)*x0*((5) * ((5) <= (2 + (2*x1))) + (2 + (2*x1)) * ((2 + (2*x1)) < (5)))) + ((-2)*x1*((5) * ((5) <= (2 + (2*x0))) + (2 + (2*x0)) * ((2 + (2*x0)) < (5)))) + (4*x0*x1) + ((5) * ((5) <= (2 + (2*x0))) + (2 + (2*x0)) * ((2 + (2*x0)) < (5))) + ((5) * ((5) <= (2 + (2*x1))) + (2 + (2*x1)) * ((2 + (2*x1)) < (5)))
tmp53 = tmp51 / tmp52
tl.store(out_ptr0 + (x4), tmp53, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/a2/ca2ipk6vwg5ykf7uixiwiry7t2tymmzgrfywc7msbxu7kq6ovbsd.py
# Topologically Sorted Source Nodes: [x_1, x_2, x_3], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# x_1 => avg_pool2d_1
# x_2 => avg_pool2d_2
# x_3 => avg_pool2d_3
# Graph fragment:
# %avg_pool2d_1 : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%avg_pool2d, [3, 3], [2, 2], [1, 1]), kwargs = {})
# %avg_pool2d_2 : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%avg_pool2d_1, [3, 3], [2, 2], [1, 1]), kwargs = {})
# %avg_pool2d_3 : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%avg_pool2d_2, [3, 3], [2, 2], [1, 1]), kwargs = {})
triton_poi_fused_avg_pool2d_1 = async_compile.triton('triton_poi_fused_avg_pool2d_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.full([1], -1, tl.int64)
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tmp5 & tmp5
tmp7 = tl.load(in_ptr0 + ((-3) + (4*x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp8 = tmp1 >= tmp1
tmp9 = tmp1 < tmp3
tmp10 = tmp8 & tmp9
tmp11 = tmp5 & tmp10
tmp12 = tl.load(in_ptr0 + ((-2) + (4*x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0)
tmp13 = tmp12 + tmp7
tmp14 = tl.full([1], 1, tl.int64)
tmp15 = tmp14 >= tmp1
tmp16 = tmp14 < tmp3
tmp17 = tmp15 & tmp16
tmp18 = tmp5 & tmp17
tmp19 = tl.load(in_ptr0 + ((-1) + (4*x0)), tmp18 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tmp19 + tmp13
tmp21 = tmp10 & tmp5
tmp22 = tl.load(in_ptr0 + ((-1) + (4*x0)), tmp21 & xmask, eviction_policy='evict_last', other=0.0)
tmp23 = tmp22 + tmp20
tmp24 = tmp10 & tmp10
tmp25 = tl.load(in_ptr0 + (4*x0), tmp24 & xmask, eviction_policy='evict_last', other=0.0)
tmp26 = tmp25 + tmp23
tmp27 = tmp10 & tmp17
tmp28 = tl.load(in_ptr0 + (1 + (4*x0)), tmp27 & xmask, eviction_policy='evict_last', other=0.0)
tmp29 = tmp28 + tmp26
tmp30 = tmp17 & tmp5
tmp31 = tl.load(in_ptr0 + (1 + (4*x0)), tmp30 & xmask, eviction_policy='evict_last', other=0.0)
tmp32 = tmp31 + tmp29
tmp33 = tmp17 & tmp10
tmp34 = tl.load(in_ptr0 + (2 + (4*x0)), tmp33 & xmask, eviction_policy='evict_last', other=0.0)
tmp35 = tmp34 + tmp32
tmp36 = tmp17 & tmp17
tmp37 = tl.load(in_ptr0 + (3 + (4*x0)), tmp36 & xmask, eviction_policy='evict_last', other=0.0)
tmp38 = tmp37 + tmp35
tmp39 = tl.full([1], 9, tl.int32)
tmp40 = tmp38 / tmp39
tmp41 = tmp0 < tmp14
tmp42 = tmp2 & tmp41
tmp43 = tmp42 & tmp42
tmp44 = tmp1 < tmp14
tmp45 = tmp8 & tmp44
tmp46 = tmp42 & tmp45
tmp47 = tmp40 + tmp40
tmp48 = tmp14 < tmp14
tmp49 = tmp15 & tmp48
tmp50 = tmp42 & tmp49
tmp51 = tmp40 + tmp47
tmp52 = tmp45 & tmp42
tmp53 = tmp40 + tmp51
tmp54 = tmp45 & tmp45
tmp55 = tmp40 + tmp53
tmp56 = tmp45 & tmp49
tmp57 = tmp40 + tmp55
tmp58 = tmp49 & tmp42
tmp59 = tmp40 + tmp57
tmp60 = tmp49 & tmp45
tmp61 = tmp40 + tmp59
tmp62 = tmp49 & tmp49
tmp63 = tmp40 + tmp61
tmp64 = tmp63 / tmp39
tmp65 = tmp64 + tmp64
tmp66 = tmp64 + tmp65
tmp67 = tmp64 + tmp66
tmp68 = tmp64 + tmp67
tmp69 = tmp64 + tmp68
tmp70 = tmp64 + tmp69
tmp71 = tmp64 + tmp70
tmp72 = tmp64 + tmp71
tmp73 = tmp72 / tmp39
tl.store(in_out_ptr0 + (x0), tmp73, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.avg_pool2d]
stream0 = get_raw_stream(0)
triton_poi_fused_avg_pool2d_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf2 = buf1; del buf1 # reuse
buf3 = reinterpret_tensor(buf2, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [x_1, x_2, x_3], Original ATen: [aten.avg_pool2d]
triton_poi_fused_avg_pool2d_1.run(buf3, buf0, 16, grid=grid(16), stream=stream0)
del buf0
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch._C
import torch.serialization
class InputInjection(nn.Module):
"""Downsampling module for CGNet."""
def __init__(self, num_downsampling):
super(InputInjection, self).__init__()
self.pool = nn.ModuleList()
for i in range(num_downsampling):
self.pool.append(nn.AvgPool2d(3, stride=2, padding=1))
def forward(self, x):
for pool in self.pool:
x = pool(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_downsampling': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch._C
import torch.serialization
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 2 % 2
x0 = xindex % 2
x3 = xindex // 2
x4 = xindex
tmp0 = -1 + 2 * x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + 2 * x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x3), tmp10 & xmask,
eviction_policy='evict_last', other=0.0)
tmp12 = 2 * x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x3), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp18 = tmp17 + tmp11
tmp19 = 1 + 2 * x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-3 + 2 * x0 + 8 * x3), tmp23 & xmask,
eviction_policy='evict_last', other=0.0)
tmp25 = tmp24 + tmp18
tmp26 = 2 * x1
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x3), tmp30 & xmask,
eviction_policy='evict_last', other=0.0)
tmp32 = tmp31 + tmp25
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (2 * x0 + 8 * x3), tmp33 & xmask,
eviction_policy='evict_last', other=0.0)
tmp35 = tmp34 + tmp32
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x3), tmp36 & xmask,
eviction_policy='evict_last', other=0.0)
tmp38 = tmp37 + tmp35
tmp39 = 1 + 2 * x1
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (3 + 2 * x0 + 8 * x3), tmp43 & xmask,
eviction_policy='evict_last', other=0.0)
tmp45 = tmp44 + tmp38
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x3), tmp46 & xmask,
eviction_policy='evict_last', other=0.0)
tmp48 = tmp47 + tmp45
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x3), tmp49 & xmask,
eviction_policy='evict_last', other=0.0)
tmp51 = tmp50 + tmp48
tmp52 = 1 + -2 * x0 + -2 * x1 + (5 * (5 <= 2 + 2 * x0) + (2 + 2 * x0) *
(2 + 2 * x0 < 5)) * (5 * (5 <= 2 + 2 * x1) + (2 + 2 * x1) * (2 + 2 *
x1 < 5)) + -2 * x0 * (5 * (5 <= 2 + 2 * x1) + (2 + 2 * x1) * (2 + 2 *
x1 < 5)) + -2 * x1 * (5 * (5 <= 2 + 2 * x0) + (2 + 2 * x0) * (2 + 2 *
x0 < 5)) + 4 * x0 * x1 + (5 * (5 <= 2 + 2 * x0) + (2 + 2 * x0) * (2 +
2 * x0 < 5)) + (5 * (5 <= 2 + 2 * x1) + (2 + 2 * x1) * (2 + 2 * x1 < 5)
)
tmp53 = tmp51 / tmp52
tl.store(out_ptr0 + x4, tmp53, xmask)
@triton.jit
def triton_poi_fused_avg_pool2d_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.full([1], -1, tl.int64)
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tmp5 & tmp5
tmp7 = tl.load(in_ptr0 + (-3 + 4 * x0), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp8 = tmp1 >= tmp1
tmp9 = tmp1 < tmp3
tmp10 = tmp8 & tmp9
tmp11 = tmp5 & tmp10
tmp12 = tl.load(in_ptr0 + (-2 + 4 * x0), tmp11 & xmask, eviction_policy
='evict_last', other=0.0)
tmp13 = tmp12 + tmp7
tmp14 = tl.full([1], 1, tl.int64)
tmp15 = tmp14 >= tmp1
tmp16 = tmp14 < tmp3
tmp17 = tmp15 & tmp16
tmp18 = tmp5 & tmp17
tmp19 = tl.load(in_ptr0 + (-1 + 4 * x0), tmp18 & xmask, eviction_policy
='evict_last', other=0.0)
tmp20 = tmp19 + tmp13
tmp21 = tmp10 & tmp5
tmp22 = tl.load(in_ptr0 + (-1 + 4 * x0), tmp21 & xmask, eviction_policy
='evict_last', other=0.0)
tmp23 = tmp22 + tmp20
tmp24 = tmp10 & tmp10
tmp25 = tl.load(in_ptr0 + 4 * x0, tmp24 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp26 = tmp25 + tmp23
tmp27 = tmp10 & tmp17
tmp28 = tl.load(in_ptr0 + (1 + 4 * x0), tmp27 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp29 = tmp28 + tmp26
tmp30 = tmp17 & tmp5
tmp31 = tl.load(in_ptr0 + (1 + 4 * x0), tmp30 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp32 = tmp31 + tmp29
tmp33 = tmp17 & tmp10
tmp34 = tl.load(in_ptr0 + (2 + 4 * x0), tmp33 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp35 = tmp34 + tmp32
tmp36 = tmp17 & tmp17
tmp37 = tl.load(in_ptr0 + (3 + 4 * x0), tmp36 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp38 = tmp37 + tmp35
tmp39 = tl.full([1], 9, tl.int32)
tmp40 = tmp38 / tmp39
tmp41 = tmp0 < tmp14
tmp42 = tmp2 & tmp41
tmp42 & tmp42
tmp44 = tmp1 < tmp14
tmp45 = tmp8 & tmp44
tmp42 & tmp45
tmp47 = tmp40 + tmp40
tmp48 = tmp14 < tmp14
tmp49 = tmp15 & tmp48
tmp42 & tmp49
tmp51 = tmp40 + tmp47
tmp45 & tmp42
tmp53 = tmp40 + tmp51
tmp45 & tmp45
tmp55 = tmp40 + tmp53
tmp45 & tmp49
tmp57 = tmp40 + tmp55
tmp49 & tmp42
tmp59 = tmp40 + tmp57
tmp49 & tmp45
tmp61 = tmp40 + tmp59
tmp49 & tmp49
tmp63 = tmp40 + tmp61
tmp64 = tmp63 / tmp39
tmp65 = tmp64 + tmp64
tmp66 = tmp64 + tmp65
tmp67 = tmp64 + tmp66
tmp68 = tmp64 + tmp67
tmp69 = tmp64 + tmp68
tmp70 = tmp64 + tmp69
tmp71 = tmp64 + tmp70
tmp72 = tmp64 + tmp71
tmp73 = tmp72 / tmp39
tl.store(in_out_ptr0 + x0, tmp73, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf2 = buf1
del buf1
buf3 = reinterpret_tensor(buf2, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf2
triton_poi_fused_avg_pool2d_1[grid(16)](buf3, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf0
return buf3,
class InputInjectionNew(nn.Module):
"""Downsampling module for CGNet."""
def __init__(self, num_downsampling):
super(InputInjectionNew, self).__init__()
self.pool = nn.ModuleList()
for i in range(num_downsampling):
self.pool.append(nn.AvgPool2d(3, stride=2, padding=1))
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| ImportPaddle/APCNet | InputInjection | false | 2,365 | [
"MIT"
] | 0 | 68ade1f83827b4cdd60ee4b6ac25454397100316 | https://github.com/ImportPaddle/APCNet/tree/68ade1f83827b4cdd60ee4b6ac25454397100316 | import torch
import torch.nn as nn
import torch._C
import torch.serialization
class Model(nn.Module):
"""Downsampling module for CGNet."""
def __init__(self, num_downsampling):
super().__init__()
self.pool = nn.ModuleList()
for i in range(num_downsampling):
self.pool.append(nn.AvgPool2d(3, stride=2, padding=1))
def forward(self, x):
for pool in self.pool:
x = pool(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
MultiHeadAttn | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/fq/cfqwyletxlxsztzvms4ugcujphw6sshjg2bm6qtmkz3kg7omhsdb.py
# Topologically Sorted Source Nodes: [attn_score], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# attn_score => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_5,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = (yindex // 16)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (32*y1) + (128*x2)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/q2/cq2bhrixv5xavijmo4bsnrvtws5kwvqhdummdlzij7fvpl7fcfb5.py
# Topologically Sorted Source Nodes: [attn_prob], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_prob => amax, clone_1, exp, sub
# Graph fragment:
# %clone_1 : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%view_15,), kwargs = {memory_format: torch.contiguous_format})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%clone_1, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tl_math.exp(tmp14)
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/kl/ckl6u3t54hef2b5wjd5dpgg7u4q64ygtqnuha2usouwtpaivlz52.py
# Topologically Sorted Source Nodes: [attn_prob], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_prob => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (y3 + (16*x2)), xmask & ymask)
tmp1 = tl.load(in_ptr0 + ((4*y1) + (16*x2)), xmask & ymask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*y1) + (16*x2)), xmask & ymask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*y1) + (16*x2)), xmask & ymask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*y1) + (16*x2)), xmask & ymask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2 + (16*y3)), tmp8, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/7t/c7tefgs6fnwuoixspsoxpumnyhdslyt74yuzsspadja5iim4s57k.py
# Topologically Sorted Source Nodes: [attn_vec], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# attn_vec => clone_3
# Graph fragment:
# %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_14,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (16 + x0 + (4*x2) + (32*x3) + (128*x1)), xmask)
tl.store(out_ptr0 + (x4), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/r2/cr2qmbufonkcpvzj5nto7mm2yb255fxx7ca2wtbl5deiamneh6dk.py
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%view_21,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 16
x2 = (xindex // 64)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1)), xmask)
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/6m/c6mhj5zwirfhy5e4o45uaeov72uwfby4udubpm2fcz42iqvs2g57.py
# Topologically Sorted Source Nodes: [add, output], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# add => add
# output => var_mean
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %view_24), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [2]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_5 = async_compile.triton('triton_poi_fused_add_native_layer_norm_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/iz/cizh7p23zwsiqbrt6dvrlvjzpyujwvyyaolptfk5xtby6foymiaz.py
# Topologically Sorted Source Nodes: [add, output], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# add => add
# output => add_1, add_2, mul_1, mul_2, rsqrt, sub_1
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %view_24), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_3), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %primals_5), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_6), kwargs = {})
triton_poi_fused_add_native_layer_norm_6 = async_compile.triton('triton_poi_fused_add_native_layer_norm_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (16, 4), (4, 1))
assert_size_stride(primals_3, (32, 4), (4, 1))
assert_size_stride(primals_4, (4, 16), (16, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [head_q], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 32), (32, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 32), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_score], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf1, buf2, 64, 4, grid=grid(64, 4), stream=stream0)
buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_score], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (4, 64, 1), 0), reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 4), (4, 1, 64, 16), torch.float32)
# Topologically Sorted Source Nodes: [attn_prob], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf3, buf4, 256, grid=grid(256), stream=stream0)
buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [attn_prob], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf4, buf5, 16, 16, grid=grid(16, 16), stream=stream0)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [attn_vec], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf1, buf6, 256, grid=grid(256), stream=stream0)
del buf1
buf7 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_vec], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 4), (1, 64, 16), 0), reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf7, buf8, 256, grid=grid(256), stream=stream0)
del buf7
buf9 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_out], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf8, (16, 16), (16, 1), 0), reinterpret_tensor(primals_4, (16, 4), (1, 16), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [add, output], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_5.run(primals_1, buf9, buf10, buf11, 16, grid=grid(16), stream=stream0)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, output], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_6.run(primals_1, buf9, buf10, buf11, primals_5, primals_6, buf12, 64, grid=grid(64), stream=stream0)
del buf10
del buf11
del primals_6
return (buf12, primals_1, primals_5, buf5, reinterpret_tensor(buf8, (16, 16), (16, 1), 0), buf9, primals_4, reinterpret_tensor(buf6, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf0, (16, 4, 4), (4, 1, 64), 0), reinterpret_tensor(buf2, (16, 4, 4), (16, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((32, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class MultiHeadAttn(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0,
pre_lnorm=False):
super(MultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.dropout = dropout
self.q_net = nn.Linear(d_model, n_head * d_head, bias=False)
self.kv_net = nn.Linear(d_model, 2 * n_head * d_head, bias=False)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = nn.LayerNorm(d_model)
self.scale = 1 / d_head ** 0.5
self.pre_lnorm = pre_lnorm
def forward(self, h, attn_mask=None, mems=None):
if mems is not None:
c = torch.cat([mems, h], 0)
else:
c = h
if self.pre_lnorm:
c = self.layer_norm(c)
head_q = self.q_net(h)
head_k, head_v = torch.chunk(self.kv_net(c), 2, -1)
head_q = head_q.view(h.size(0), h.size(1), self.n_head, self.d_head)
head_k = head_k.view(c.size(0), c.size(1), self.n_head, self.d_head)
head_v = head_v.view(c.size(0), c.size(1), self.n_head, self.d_head)
attn_score = torch.einsum('ibnd,jbnd->ijbn', (head_q, head_k))
attn_score.mul_(self.scale)
if attn_mask is not None and attn_mask.any().item():
if attn_mask.dim() == 2:
attn_score.masked_fill_(attn_mask[None, :, :, None], -float
('inf'))
elif attn_mask.dim() == 3:
attn_score.masked_fill_(attn_mask[:, :, :, None], -float('inf')
)
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, head_v))
attn_vec = attn_vec.contiguous().view(attn_vec.size(0), attn_vec.
size(1), self.n_head * self.d_head)
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
output = h + attn_out
else:
output = self.layer_norm(h + attn_out)
return output
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_head': 4, 'd_model': 4, 'd_head': 4, 'dropout': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 32 * y1 + 128 * x2), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tl_math.exp(tmp14)
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (y3 + 16 * x2), xmask & ymask)
tmp1 = tl.load(in_ptr0 + (4 * y1 + 16 * x2), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * y1 + 16 * x2), xmask & ymask,
eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * y1 + 16 * x2), xmask & ymask,
eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * y1 + 16 * x2), xmask & ymask,
eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2 + 16 * y3), tmp8, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (16 + x0 + 4 * x2 + 32 * x3 + 128 * x1), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 16
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (16, 4), (4, 1))
assert_size_stride(primals_3, (32, 4), (4, 1))
assert_size_stride(primals_4, (4, 16), (16, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 32), (32, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 32), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch
.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64, 4)](buf1, buf2, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (4, 64, 1),
0), reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 4), (4, 1, 64, 16), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf3, buf4, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
triton_poi_fused__softmax_2[grid(16, 16)](buf4, buf5, 16, 16,
XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0)
del buf4
triton_poi_fused_clone_3[grid(256)](buf1, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf1
buf7 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 4), (1, 64, 16),
0), reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_4[grid(256)](buf7, buf8, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf7
buf9 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf8, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_4, (16, 4), (1, 16), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_5[grid(16)](primals_1, buf9,
buf10, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_6[grid(64)](primals_1, buf9,
buf10, buf11, primals_5, primals_6, buf12, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf10
del buf11
del primals_6
return buf12, primals_1, primals_5, buf5, reinterpret_tensor(buf8, (16,
16), (16, 1), 0), buf9, primals_4, reinterpret_tensor(buf6, (16, 4,
4), (16, 1, 4), 0), reinterpret_tensor(buf0, (16, 4, 4), (4, 1, 64), 0
), reinterpret_tensor(buf2, (16, 4, 4), (16, 1, 4), 0)
class MultiHeadAttnNew(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0,
pre_lnorm=False):
super(MultiHeadAttnNew, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.dropout = dropout
self.q_net = nn.Linear(d_model, n_head * d_head, bias=False)
self.kv_net = nn.Linear(d_model, 2 * n_head * d_head, bias=False)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = nn.LayerNorm(d_model)
self.scale = 1 / d_head ** 0.5
self.pre_lnorm = pre_lnorm
def forward(self, input_0):
primals_2 = self.q_net.weight
primals_3 = self.kv_net.weight
primals_4 = self.o_net.weight
primals_5 = self.layer_norm.weight
primals_6 = self.layer_norm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| HikariNoMJ14/bebopnet-code | MultiHeadAttn | false | 2,366 | [
"MIT"
] | 0 | 9dfa800d3e24c53de5dc948b87a7db2bc2919b54 | https://github.com/HikariNoMJ14/bebopnet-code/tree/9dfa800d3e24c53de5dc948b87a7db2bc2919b54 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0,
pre_lnorm=False):
super().__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.dropout = dropout
self.q_net = nn.Linear(d_model, n_head * d_head, bias=False)
self.kv_net = nn.Linear(d_model, 2 * n_head * d_head, bias=False)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = nn.LayerNorm(d_model)
self.scale = 1 / d_head ** 0.5
self.pre_lnorm = pre_lnorm
def forward(self, h, attn_mask=None, mems=None):
if mems is not None:
c = torch.cat([mems, h], 0)
else:
c = h
if self.pre_lnorm:
c = self.layer_norm(c)
head_q = self.q_net(h)
head_k, head_v = torch.chunk(self.kv_net(c), 2, -1)
head_q = head_q.view(h.size(0), h.size(1), self.n_head, self.d_head)
head_k = head_k.view(c.size(0), c.size(1), self.n_head, self.d_head)
head_v = head_v.view(c.size(0), c.size(1), self.n_head, self.d_head)
attn_score = torch.einsum('ibnd,jbnd->ijbn', (head_q, head_k))
attn_score.mul_(self.scale)
if attn_mask is not None and attn_mask.any().item():
if attn_mask.dim() == 2:
attn_score.masked_fill_(attn_mask[None, :, :, None], -float
('inf'))
elif attn_mask.dim() == 3:
attn_score.masked_fill_(attn_mask[:, :, :, None], -float('inf')
)
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, head_v))
attn_vec = attn_vec.contiguous().view(attn_vec.size(0), attn_vec.
size(1), self.n_head * self.d_head)
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
output = h + attn_out
else:
output = self.layer_norm(h + attn_out)
return output
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4, 4, 0.5]
|
Encoding | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/yx/cyx5u6kg47bcb2a4mvrlkw5ynd42h4mj76hk2j6tveptehbkmic4.py
# Topologically Sorted Source Nodes: [sub, pow_1, sum_1, scaled_l2_norm], Original ATen: [aten.sub, aten.pow, aten.sum, aten.mul]
# Source node to ATen node mapping:
# pow_1 => pow_1
# scaled_l2_norm => mul
# sub => sub
# sum_1 => sum_1
# Graph fragment:
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%expand, %view_2), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [3]), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %sum_1), kwargs = {})
triton_poi_fused_mul_pow_sub_sum_0 = async_compile.triton('triton_poi_fused_mul_pow_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_pow_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_pow_sub_sum_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 16
x2 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x1 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (16 + x1 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (32 + x1 + (64*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (48 + x1 + (64*x2)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 - tmp2
tmp4 = tmp3 * tmp3
tmp7 = tmp5 - tmp6
tmp8 = tmp7 * tmp7
tmp9 = tmp4 + tmp8
tmp12 = tmp10 - tmp11
tmp13 = tmp12 * tmp12
tmp14 = tmp9 + tmp13
tmp17 = tmp15 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp14 + tmp18
tmp20 = tmp0 * tmp19
tl.store(out_ptr0 + (x4), tmp20, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/hz/chz2sqsqk26mwhf2dxhgh44jfpu2er5yqjftwkzfav5ctqtx5e7f.py
# Topologically Sorted Source Nodes: [assignment_weights], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# assignment_weights => amax, exp, sub_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul, [2], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/3f/c3fx6bzkalkw7u7askqdnz4rzlcoyqiec4r434sjc5x3axxgkrmr.py
# Topologically Sorted Source Nodes: [assignment_weights], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# assignment_weights => div, sum_2
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_2), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/36/c36bunrpokrzs5svt4e6kwmfyyitmjh7nivu5rc6sidx55znumrf.py
# Topologically Sorted Source Nodes: [sub, mul_1, encoded_feat], Original ATen: [aten.sub, aten.mul, aten.sum]
# Source node to ATen node mapping:
# encoded_feat => sum_3
# mul_1 => mul_1
# sub => sub
# Graph fragment:
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%expand, %view_2), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_2, %sub), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [1]), kwargs = {})
triton_per_fused_mul_sub_sum_3 = async_compile.triton('triton_per_fused_mul_sub_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sub_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mul_sub_sum_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r3 = rindex
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x0 = xindex % 4
x4 = xindex % 16
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x1 + (4*r3) + (64*x2)), xmask, eviction_policy='evict_last', other=0.0)
tmp1 = tl.load(in_ptr1 + (r3 + (16*x0) + (64*x2)), xmask, eviction_policy='evict_last', other=0.0)
tmp2 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last')
tmp3 = tmp1 - tmp2
tmp4 = tmp0 * tmp3
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp5, 0)
tmp8 = tl.sum(tmp7, 1)[:, None]
tl.store(out_ptr0 + (x5), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, pow_1, sum_1, scaled_l2_norm], Original ATen: [aten.sub, aten.pow, aten.sum, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_pow_sub_sum_0.run(primals_3, primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0)
buf1 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [assignment_weights], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [assignment_weights], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf1, buf2, 256, grid=grid(256), stream=stream0)
del buf1
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, mul_1, encoded_feat], Original ATen: [aten.sub, aten.mul, aten.sum]
triton_per_fused_mul_sub_sum_3.run(buf2, primals_1, primals_2, buf3, 64, 16, grid=grid(64), stream=stream0)
del buf2
return (buf3, primals_1, primals_2, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch._C
import torch.serialization
class Encoding(nn.Module):
"""Encoding Layer: a learnable residual encoder.
Input is of shape (batch_size, channels, height, width).
Output is of shape (batch_size, num_codes, channels).
Args:
channels: dimension of the features or feature channels
num_codes: number of code words
"""
def __init__(self, channels, num_codes):
super(Encoding, self).__init__()
self.channels, self.num_codes = channels, num_codes
std = 1.0 / (num_codes * channels) ** 0.5
self.codewords = nn.Parameter(torch.empty(num_codes, channels,
dtype=torch.float).uniform_(-std, std), requires_grad=True)
self.scale = nn.Parameter(torch.empty(num_codes, dtype=torch.float)
.uniform_(-1, 0), requires_grad=True)
@staticmethod
def scaled_l2(x, codewords, scale):
num_codes, channels = codewords.size()
batch_size = x.size(0)
reshaped_scale = scale.view((1, 1, num_codes))
expanded_x = x.unsqueeze(2).expand((batch_size, x.size(1),
num_codes, channels))
reshaped_codewords = codewords.view((1, 1, num_codes, channels))
scaled_l2_norm = reshaped_scale * (expanded_x - reshaped_codewords
).pow(2).sum(dim=3)
return scaled_l2_norm
@staticmethod
def aggregate(assignment_weights, x, codewords):
num_codes, channels = codewords.size()
reshaped_codewords = codewords.view((1, 1, num_codes, channels))
batch_size = x.size(0)
expanded_x = x.unsqueeze(2).expand((batch_size, x.size(1),
num_codes, channels))
encoded_feat = (assignment_weights.unsqueeze(3) * (expanded_x -
reshaped_codewords)).sum(dim=1)
return encoded_feat
def forward(self, x):
assert x.dim() == 4 and x.size(1) == self.channels
batch_size = x.size(0)
x = x.view(batch_size, self.channels, -1).transpose(1, 2).contiguous()
assignment_weights = F.softmax(self.scaled_l2(x, self.codewords,
self.scale), dim=2)
encoded_feat = self.aggregate(assignment_weights, x, self.codewords)
return encoded_feat
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (
f'(Nx{self.channels}xHxW =>Nx{self.num_codes}x{self.channels})')
return repr_str
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4, 'num_codes': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch._C
import torch.serialization
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_pow_sub_sum_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 16
x2 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x1 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (16 + x1 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (32 + x1 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr1 + (48 + x1 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tmp1 - tmp2
tmp4 = tmp3 * tmp3
tmp7 = tmp5 - tmp6
tmp8 = tmp7 * tmp7
tmp9 = tmp4 + tmp8
tmp12 = tmp10 - tmp11
tmp13 = tmp12 * tmp12
tmp14 = tmp9 + tmp13
tmp17 = tmp15 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp14 + tmp18
tmp20 = tmp0 * tmp19
tl.store(out_ptr0 + x4, tmp20, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_per_fused_mul_sub_sum_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r3 = rindex
x1 = xindex // 4 % 4
x2 = xindex // 16
x0 = xindex % 4
x4 = xindex % 16
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x1 + 4 * r3 + 64 * x2), xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.load(in_ptr1 + (r3 + 16 * x0 + 64 * x2), xmask,
eviction_policy='evict_last', other=0.0)
tmp2 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp3 = tmp1 - tmp2
tmp4 = tmp0 * tmp3
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp5, 0)
tmp8 = tl.sum(tmp7, 1)[:, None]
tl.store(out_ptr0 + x5, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_pow_sub_sum_0[grid(256)](primals_3, primals_1,
primals_2, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf0, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused__softmax_2[grid(256)](buf1, buf2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf1
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_per_fused_mul_sub_sum_3[grid(64)](buf2, primals_1, primals_2,
buf3, 64, 16, XBLOCK=1, num_warps=2, num_stages=1)
del buf2
return buf3, primals_1, primals_2, primals_3
class EncodingNew(nn.Module):
"""Encoding Layer: a learnable residual encoder.
Input is of shape (batch_size, channels, height, width).
Output is of shape (batch_size, num_codes, channels).
Args:
channels: dimension of the features or feature channels
num_codes: number of code words
"""
def __init__(self, channels, num_codes):
super(EncodingNew, self).__init__()
self.channels, self.num_codes = channels, num_codes
std = 1.0 / (num_codes * channels) ** 0.5
self.codewords = nn.Parameter(torch.empty(num_codes, channels,
dtype=torch.float).uniform_(-std, std), requires_grad=True)
self.scale = nn.Parameter(torch.empty(num_codes, dtype=torch.float)
.uniform_(-1, 0), requires_grad=True)
@staticmethod
def scaled_l2(x, codewords, scale):
num_codes, channels = codewords.size()
batch_size = x.size(0)
reshaped_scale = scale.view((1, 1, num_codes))
expanded_x = x.unsqueeze(2).expand((batch_size, x.size(1),
num_codes, channels))
reshaped_codewords = codewords.view((1, 1, num_codes, channels))
scaled_l2_norm = reshaped_scale * (expanded_x - reshaped_codewords
).pow(2).sum(dim=3)
return scaled_l2_norm
@staticmethod
def aggregate(assignment_weights, x, codewords):
num_codes, channels = codewords.size()
reshaped_codewords = codewords.view((1, 1, num_codes, channels))
batch_size = x.size(0)
expanded_x = x.unsqueeze(2).expand((batch_size, x.size(1),
num_codes, channels))
encoded_feat = (assignment_weights.unsqueeze(3) * (expanded_x -
reshaped_codewords)).sum(dim=1)
return encoded_feat
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (
f'(Nx{self.channels}xHxW =>Nx{self.num_codes}x{self.channels})')
return repr_str
def forward(self, input_0):
primals_2 = self.codewords
primals_3 = self.scale
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| ImportPaddle/APCNet | Encoding | false | 2,367 | [
"MIT"
] | 0 | 68ade1f83827b4cdd60ee4b6ac25454397100316 | https://github.com/ImportPaddle/APCNet/tree/68ade1f83827b4cdd60ee4b6ac25454397100316 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch._C
import torch.serialization
class Model(nn.Module):
"""Encoding Layer: a learnable residual encoder.
Input is of shape (batch_size, channels, height, width).
Output is of shape (batch_size, num_codes, channels).
Args:
channels: dimension of the features or feature channels
num_codes: number of code words
"""
def __init__(self, channels, num_codes):
super().__init__()
self.channels, self.num_codes = channels, num_codes
std = 1.0 / (num_codes * channels) ** 0.5
self.codewords = nn.Parameter(torch.empty(num_codes, channels,
dtype=torch.float).uniform_(-std, std), requires_grad=True)
self.scale = nn.Parameter(torch.empty(num_codes, dtype=torch.float)
.uniform_(-1, 0), requires_grad=True)
@staticmethod
def scaled_l2(x, codewords, scale):
num_codes, channels = codewords.size()
batch_size = x.size(0)
reshaped_scale = scale.view((1, 1, num_codes))
expanded_x = x.unsqueeze(2).expand((batch_size, x.size(1),
num_codes, channels))
reshaped_codewords = codewords.view((1, 1, num_codes, channels))
scaled_l2_norm = reshaped_scale * (expanded_x - reshaped_codewords
).pow(2).sum(dim=3)
return scaled_l2_norm
@staticmethod
def aggregate(assignment_weights, x, codewords):
num_codes, channels = codewords.size()
reshaped_codewords = codewords.view((1, 1, num_codes, channels))
batch_size = x.size(0)
expanded_x = x.unsqueeze(2).expand((batch_size, x.size(1),
num_codes, channels))
encoded_feat = (assignment_weights.unsqueeze(3) * (expanded_x -
reshaped_codewords)).sum(dim=1)
return encoded_feat
def forward(self, x):
assert x.dim() == 4 and x.size(1) == self.channels
batch_size = x.size(0)
x = x.view(batch_size, self.channels, -1).transpose(1, 2).contiguous()
assignment_weights = F.softmax(self.scaled_l2(x, self.codewords,
self.scale), dim=2)
encoded_feat = self.aggregate(assignment_weights, x, self.codewords)
return encoded_feat
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (
f'(Nx{self.channels}xHxW =>Nx{self.num_codes}x{self.channels})')
return repr_str
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
PositionwiseFeedForward | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/7b/c7bsbu5nqjwno7oolhruidochm2rierdki7fkzahol2dvs7dgv5t.py
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-06), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-06
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/lh/clhh73owbiuj4adasmetdqsot2nlmw2ljupnw2q4yt3du76mikww.py
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-06), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_2), kwargs = {})
triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/62/c622us6etqvroftqelxdgdedtcxkzuvbkchqjjtjl3nrhqvihz22.py
# Topologically Sorted Source Nodes: [mul, pow_1, mul_1, add, mul_2, tanh, add_1, mul_3], Original ATen: [aten.mul, aten.pow, aten.add, aten.tanh]
# Source node to ATen node mapping:
# add => add_2
# add_1 => add_3
# mul => mul_2
# mul_1 => mul_3
# mul_2 => mul_4
# mul_3 => mul_5
# pow_1 => pow_1
# tanh => tanh
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_1, 3), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.044715), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %mul_3), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, 0.7978845608028654), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%mul_4,), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, 1), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %add_3), kwargs = {})
triton_poi_fused_add_mul_pow_tanh_2 = async_compile.triton('triton_poi_fused_add_mul_pow_tanh_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_pow_tanh_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_pow_tanh_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tmp0 * tmp0
tmp4 = tmp3 * tmp0
tmp5 = 0.044715
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tmp8 = 0.7978845608028654
tmp9 = tmp7 * tmp8
tmp10 = libdevice.tanh(tmp9)
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp2 * tmp12
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/5q/c5qmnkuxezgezseizmolw3mx24fyy6xp3cfoz3egpqwcprxgwjre.py
# Topologically Sorted Source Nodes: [add_2], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add_2 => add_4
# Graph fragment:
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_3), kwargs = {})
triton_poi_fused_add_3 = async_compile.triton('triton_poi_fused_add_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(primals_3, buf0, buf1, 64, grid=grid(64), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(primals_3, buf0, buf1, primals_1, primals_2, buf2, 256, grid=grid(256), stream=stream0)
del buf0
del buf1
del primals_1
del primals_2
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, pow_1, mul_1, add, mul_2, tanh, add_1, mul_3], Original ATen: [aten.mul, aten.pow, aten.add, aten.tanh]
triton_poi_fused_add_mul_pow_tanh_2.run(buf3, buf4, 256, grid=grid(256), stream=stream0)
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [add_2], Original ATen: [aten.add]
triton_poi_fused_add_3.run(buf6, primals_7, primals_3, 256, grid=grid(256), stream=stream0)
del primals_7
return (buf6, primals_3, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), buf3, reinterpret_tensor(buf4, (64, 4), (4, 1), 0), primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.distributed
import torch.nn as nn
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 *
torch.pow(x, 3))))
class PositionwiseFeedForward(nn.Module):
""" A two-layer Feed-Forward-Network with residual layer norm.
Args:
d_model (int): the size of input for the first-layer of the FFN.
d_ff (int): the hidden layer size of the second-layer
of the FNN.
dropout (float): dropout probability in :math:`[0, 1)`.
"""
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-06)
self.actv = gelu
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
def forward(self, x):
inter = self.dropout_1(self.actv(self.w_1(self.layer_norm(x))))
output = self.dropout_2(self.w_2(inter))
return output + x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'd_ff': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.distributed
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-06
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_mul_pow_tanh_2(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tmp0 * tmp0
tmp4 = tmp3 * tmp0
tmp5 = 0.044715
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tmp8 = 0.7978845608028654
tmp9 = tmp7 * tmp8
tmp10 = libdevice.tanh(tmp9)
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp2 * tmp12
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(64)](primals_3, buf0,
buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(256)](primals_3, buf0,
buf1, primals_1, primals_2, buf2, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf0
del buf1
del primals_1
del primals_2
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_pow_tanh_2[grid(256)](buf3, buf4, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_add_3[grid(256)](buf6, primals_7, primals_3, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
return buf6, primals_3, reinterpret_tensor(buf2, (64, 4), (4, 1), 0
), buf3, reinterpret_tensor(buf4, (64, 4), (4, 1), 0
), primals_6, primals_4
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 *
torch.pow(x, 3))))
class PositionwiseFeedForwardNew(nn.Module):
""" A two-layer Feed-Forward-Network with residual layer norm.
Args:
d_model (int): the size of input for the first-layer of the FFN.
d_ff (int): the hidden layer size of the second-layer
of the FNN.
dropout (float): dropout probability in :math:`[0, 1)`.
"""
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForwardNew, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-06)
self.actv = gelu
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
def forward(self, input_0):
primals_4 = self.w_1.weight
primals_1 = self.w_1.bias
primals_6 = self.w_2.weight
primals_2 = self.w_2.bias
primals_5 = self.layer_norm.weight
primals_7 = self.layer_norm.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| GraphGrailAi/summ-abs-dev | PositionwiseFeedForward | false | 2,368 | [
"MIT"
] | 0 | 512f253bf72b6529589b29d06959b560b79f1cde | https://github.com/GraphGrailAi/summ-abs-dev/tree/512f253bf72b6529589b29d06959b560b79f1cde | import math
import torch
import torch.distributed
import torch.nn as nn
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 *
torch.pow(x, 3))))
class Model(nn.Module):
""" A two-layer Feed-Forward-Network with residual layer norm.
Args:
d_model (int): the size of input for the first-layer of the FFN.
d_ff (int): the hidden layer size of the second-layer
of the FNN.
dropout (float): dropout probability in :math:`[0, 1)`.
"""
def __init__(self, d_model, d_ff, dropout=0.1):
super().__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-06)
self.actv = gelu
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
def forward(self, x):
inter = self.dropout_1(self.actv(self.w_1(self.layer_norm(x))))
output = self.dropout_2(self.w_2(inter))
return output + x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
DiceLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/um/cum65j23qchrjf5dndblqgbw6zomhgwfj2obfidtgy7b5j3zwklm.py
# Topologically Sorted Source Nodes: [pred], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# pred => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/wk/cwk2wao7opapqbjj7klnqrd6tgist3ts3nc5veryzhzstwpx7d4l.py
# Topologically Sorted Source Nodes: [pred], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# pred => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=4] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/uq/cuqmt6h2h2qrwlfougoxbc4cnmi6o42afntw3nwkdzak2kpz2xum.py
# Topologically Sorted Source Nodes: [mul, ne, valid_mask, valid_mask_1, mul_1, sum_1, mul_2, num, pow_1, pow_2, add_1, sum_2, den, truediv, loss, loss_1, total_loss, mul_3, valid_mask_2, mul_4, sum_3, mul_5, num_1, pow_3, pow_4, add_5, sum_4, den_1, truediv_1, loss_2, loss_3, total_loss_1, mul_6, valid_mask_3, mul_7, sum_5, mul_8, num_2, pow_5, pow_6, add_8, sum_6, den_2, truediv_2, loss_4, loss_5, total_loss_2, mul_9, valid_mask_4, mul_10, sum_7, mul_11, num_3, pow_7, pow_8, add_11, sum_8, den_3, truediv_3, loss_6, loss_7, total_loss_3, loss_8, loss_9, loss_10], Original ATen: [aten.mul, aten.ne, aten._to_copy, aten.view, aten.sum, aten.add, aten.pow, aten.div, aten.rsub, aten.mean]
# Source node to ATen node mapping:
# add_1 => add_1
# add_11 => add_13
# add_5 => add_5
# add_8 => add_9
# den => add_2
# den_1 => add_6
# den_2 => add_10
# den_3 => add_14
# loss => sub_1
# loss_1 => mean
# loss_10 => mul_12
# loss_2 => sub_2
# loss_3 => mean_1
# loss_4 => sub_3
# loss_5 => mean_2
# loss_6 => sub_4
# loss_7 => mean_3
# loss_8 => div_5
# loss_9 => mean_4
# mul => mul
# mul_1 => mul_1
# mul_10 => mul_10
# mul_11 => mul_11
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# mul_5 => mul_5
# mul_6 => mul_6
# mul_7 => mul_7
# mul_8 => mul_8
# mul_9 => mul_9
# ne => ne
# num => add
# num_1 => add_4
# num_2 => add_8
# num_3 => add_12
# pow_1 => pow_1
# pow_2 => pow_2
# pow_3 => pow_3
# pow_4 => pow_4
# pow_5 => pow_5
# pow_6 => pow_6
# pow_7 => pow_7
# pow_8 => pow_8
# sum_1 => sum_2
# sum_2 => sum_3
# sum_3 => sum_4
# sum_4 => sum_5
# sum_5 => sum_6
# sum_6 => sum_7
# sum_7 => sum_8
# sum_8 => sum_9
# total_loss => add_3
# total_loss_1 => add_7
# total_loss_2 => add_11
# total_loss_3 => add_15
# truediv => div_1
# truediv_1 => div_2
# truediv_2 => div_3
# truediv_3 => div_4
# valid_mask => convert_element_type_2
# valid_mask_1 => view_2
# valid_mask_2 => view_5
# valid_mask_3 => view_8
# valid_mask_4 => view_11
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %select_1), kwargs = {})
# %ne : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%arg1_1, 255), kwargs = {})
# %convert_element_type_2 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%ne, torch.int64), kwargs = {})
# %view_2 : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%convert_element_type_2, [4, -1]), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %view_2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [1]), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_2, 2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, 1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view, 2), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_1, 2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_1, %pow_2), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%add_1, [1]), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_3, 1), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, %add_2), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div_1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_1,), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, 0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, %select_3), kwargs = {})
# %view_5 : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%convert_element_type_2, [4, -1]), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %view_5), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_4, [1]), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_4, 2), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, 1), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_3, 2), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_3, 2), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_3, %pow_4), kwargs = {})
# %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%add_5, [1]), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_5, 1), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_4, %add_6), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div_2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_2,), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %mean_1), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_6, %select_5), kwargs = {})
# %view_8 : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%convert_element_type_2, [4, -1]), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_6, %view_8), kwargs = {})
# %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_7, [1]), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_6, 2), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_8, 1), kwargs = {})
# %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_6, 2), kwargs = {})
# %pow_6 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_5, 2), kwargs = {})
# %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_5, %pow_6), kwargs = {})
# %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%add_9, [1]), kwargs = {})
# %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_7, 1), kwargs = {})
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_8, %add_10), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div_3), kwargs = {})
# %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_3,), kwargs = {})
# %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_7, %mean_2), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_9, %select_7), kwargs = {})
# %view_11 : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%convert_element_type_2, [4, -1]), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_9, %view_11), kwargs = {})
# %sum_8 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_10, [1]), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_8, 2), kwargs = {})
# %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_11, 1), kwargs = {})
# %pow_7 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_9, 2), kwargs = {})
# %pow_8 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_7, 2), kwargs = {})
# %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_7, %pow_8), kwargs = {})
# %sum_9 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%add_13, [1]), kwargs = {})
# %add_14 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_9, 1), kwargs = {})
# %div_4 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_12, %add_14), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div_4), kwargs = {})
# %mean_3 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_4,), kwargs = {})
# %add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_11, %mean_3), kwargs = {})
# %div_5 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_15, 4), kwargs = {})
# %mean_4 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%div_5,), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean_4, 1.0), kwargs = {})
triton_per_fused__to_copy_add_div_mean_mul_ne_pow_rsub_sum_view_2 = async_compile.triton('triton_per_fused__to_copy_add_div_mean_mul_ne_pow_rsub_sum_view_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_add_div_mean_mul_ne_pow_rsub_sum_view_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__to_copy_add_div_mean_mul_ne_pow_rsub_sum_view_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr1 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp42 = tl.load(in_ptr1 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp71 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp112 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp153 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp2 = tmp1.to(tl.int64)
tmp3 = tl.full([1, 1], 0, tl.int64)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = tl.full([1, 1], 3, tl.int64)
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = tmp6 == tmp3
tmp8 = tmp7.to(tl.int64)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp0 * tmp9
tmp11 = 255.0
tmp12 = tmp1 != tmp11
tmp13 = tmp12.to(tl.int64)
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp10 * tmp14
tmp17 = tmp16.to(tl.int64)
tmp18 = triton_helpers.maximum(tmp17, tmp3)
tmp19 = triton_helpers.minimum(tmp18, tmp5)
tmp20 = tmp19 == tmp3
tmp21 = tmp20.to(tl.int64)
tmp22 = tmp21.to(tl.float32)
tmp23 = tmp0 * tmp22
tmp24 = tmp16 != tmp11
tmp25 = tmp24.to(tl.int64)
tmp26 = tmp25.to(tl.float32)
tmp27 = tmp23 * tmp26
tmp28 = tmp15 + tmp27
tmp30 = tmp29.to(tl.int64)
tmp31 = triton_helpers.maximum(tmp30, tmp3)
tmp32 = triton_helpers.minimum(tmp31, tmp5)
tmp33 = tmp32 == tmp3
tmp34 = tmp33.to(tl.int64)
tmp35 = tmp34.to(tl.float32)
tmp36 = tmp0 * tmp35
tmp37 = tmp29 != tmp11
tmp38 = tmp37.to(tl.int64)
tmp39 = tmp38.to(tl.float32)
tmp40 = tmp36 * tmp39
tmp41 = tmp28 + tmp40
tmp43 = tmp42.to(tl.int64)
tmp44 = triton_helpers.maximum(tmp43, tmp3)
tmp45 = triton_helpers.minimum(tmp44, tmp5)
tmp46 = tmp45 == tmp3
tmp47 = tmp46.to(tl.int64)
tmp48 = tmp47.to(tl.float32)
tmp49 = tmp0 * tmp48
tmp50 = tmp42 != tmp11
tmp51 = tmp50.to(tl.int64)
tmp52 = tmp51.to(tl.float32)
tmp53 = tmp49 * tmp52
tmp54 = tmp41 + tmp53
tmp55 = tmp0 * tmp0
tmp56 = tmp8 * tmp8
tmp57 = tmp56.to(tl.float32)
tmp58 = tmp55 + tmp57
tmp59 = tmp21 * tmp21
tmp60 = tmp59.to(tl.float32)
tmp61 = tmp55 + tmp60
tmp62 = tmp58 + tmp61
tmp63 = tmp34 * tmp34
tmp64 = tmp63.to(tl.float32)
tmp65 = tmp55 + tmp64
tmp66 = tmp62 + tmp65
tmp67 = tmp47 * tmp47
tmp68 = tmp67.to(tl.float32)
tmp69 = tmp55 + tmp68
tmp70 = tmp66 + tmp69
tmp72 = tl.full([1, 1], 1, tl.int64)
tmp73 = tmp6 == tmp72
tmp74 = tmp73.to(tl.int64)
tmp75 = tmp74.to(tl.float32)
tmp76 = tmp71 * tmp75
tmp77 = tmp76 * tmp14
tmp78 = tmp19 == tmp72
tmp79 = tmp78.to(tl.int64)
tmp80 = tmp79.to(tl.float32)
tmp81 = tmp71 * tmp80
tmp82 = tmp81 * tmp26
tmp83 = tmp77 + tmp82
tmp84 = tmp32 == tmp72
tmp85 = tmp84.to(tl.int64)
tmp86 = tmp85.to(tl.float32)
tmp87 = tmp71 * tmp86
tmp88 = tmp87 * tmp39
tmp89 = tmp83 + tmp88
tmp90 = tmp45 == tmp72
tmp91 = tmp90.to(tl.int64)
tmp92 = tmp91.to(tl.float32)
tmp93 = tmp71 * tmp92
tmp94 = tmp93 * tmp52
tmp95 = tmp89 + tmp94
tmp96 = tmp71 * tmp71
tmp97 = tmp74 * tmp74
tmp98 = tmp97.to(tl.float32)
tmp99 = tmp96 + tmp98
tmp100 = tmp79 * tmp79
tmp101 = tmp100.to(tl.float32)
tmp102 = tmp96 + tmp101
tmp103 = tmp99 + tmp102
tmp104 = tmp85 * tmp85
tmp105 = tmp104.to(tl.float32)
tmp106 = tmp96 + tmp105
tmp107 = tmp103 + tmp106
tmp108 = tmp91 * tmp91
tmp109 = tmp108.to(tl.float32)
tmp110 = tmp96 + tmp109
tmp111 = tmp107 + tmp110
tmp113 = tl.full([1, 1], 2, tl.int64)
tmp114 = tmp6 == tmp113
tmp115 = tmp114.to(tl.int64)
tmp116 = tmp115.to(tl.float32)
tmp117 = tmp112 * tmp116
tmp118 = tmp117 * tmp14
tmp119 = tmp19 == tmp113
tmp120 = tmp119.to(tl.int64)
tmp121 = tmp120.to(tl.float32)
tmp122 = tmp112 * tmp121
tmp123 = tmp122 * tmp26
tmp124 = tmp118 + tmp123
tmp125 = tmp32 == tmp113
tmp126 = tmp125.to(tl.int64)
tmp127 = tmp126.to(tl.float32)
tmp128 = tmp112 * tmp127
tmp129 = tmp128 * tmp39
tmp130 = tmp124 + tmp129
tmp131 = tmp45 == tmp113
tmp132 = tmp131.to(tl.int64)
tmp133 = tmp132.to(tl.float32)
tmp134 = tmp112 * tmp133
tmp135 = tmp134 * tmp52
tmp136 = tmp130 + tmp135
tmp137 = tmp112 * tmp112
tmp138 = tmp115 * tmp115
tmp139 = tmp138.to(tl.float32)
tmp140 = tmp137 + tmp139
tmp141 = tmp120 * tmp120
tmp142 = tmp141.to(tl.float32)
tmp143 = tmp137 + tmp142
tmp144 = tmp140 + tmp143
tmp145 = tmp126 * tmp126
tmp146 = tmp145.to(tl.float32)
tmp147 = tmp137 + tmp146
tmp148 = tmp144 + tmp147
tmp149 = tmp132 * tmp132
tmp150 = tmp149.to(tl.float32)
tmp151 = tmp137 + tmp150
tmp152 = tmp148 + tmp151
tmp154 = tmp6 == tmp5
tmp155 = tmp154.to(tl.int64)
tmp156 = tmp155.to(tl.float32)
tmp157 = tmp153 * tmp156
tmp158 = tmp157 * tmp14
tmp159 = tmp19 == tmp5
tmp160 = tmp159.to(tl.int64)
tmp161 = tmp160.to(tl.float32)
tmp162 = tmp153 * tmp161
tmp163 = tmp162 * tmp26
tmp164 = tmp158 + tmp163
tmp165 = tmp32 == tmp5
tmp166 = tmp165.to(tl.int64)
tmp167 = tmp166.to(tl.float32)
tmp168 = tmp153 * tmp167
tmp169 = tmp168 * tmp39
tmp170 = tmp164 + tmp169
tmp171 = tmp45 == tmp5
tmp172 = tmp171.to(tl.int64)
tmp173 = tmp172.to(tl.float32)
tmp174 = tmp153 * tmp173
tmp175 = tmp174 * tmp52
tmp176 = tmp170 + tmp175
tmp177 = tmp153 * tmp153
tmp178 = tmp155 * tmp155
tmp179 = tmp178.to(tl.float32)
tmp180 = tmp177 + tmp179
tmp181 = tmp160 * tmp160
tmp182 = tmp181.to(tl.float32)
tmp183 = tmp177 + tmp182
tmp184 = tmp180 + tmp183
tmp185 = tmp166 * tmp166
tmp186 = tmp185.to(tl.float32)
tmp187 = tmp177 + tmp186
tmp188 = tmp184 + tmp187
tmp189 = tmp172 * tmp172
tmp190 = tmp189.to(tl.float32)
tmp191 = tmp177 + tmp190
tmp192 = tmp188 + tmp191
tmp193 = 2.0
tmp194 = tmp54 * tmp193
tmp195 = 1.0
tmp196 = tmp194 + tmp195
tmp197 = tmp70 + tmp195
tmp198 = tmp196 / tmp197
tmp199 = tmp195 - tmp198
tmp200 = tl.broadcast_to(tmp199, [XBLOCK, RBLOCK])
tmp202 = tl.sum(tmp200, 1)[:, None]
tmp203 = tmp95 * tmp193
tmp204 = tmp203 + tmp195
tmp205 = tmp111 + tmp195
tmp206 = tmp204 / tmp205
tmp207 = tmp195 - tmp206
tmp208 = tl.broadcast_to(tmp207, [XBLOCK, RBLOCK])
tmp210 = tl.sum(tmp208, 1)[:, None]
tmp211 = tmp136 * tmp193
tmp212 = tmp211 + tmp195
tmp213 = tmp152 + tmp195
tmp214 = tmp212 / tmp213
tmp215 = tmp195 - tmp214
tmp216 = tl.broadcast_to(tmp215, [XBLOCK, RBLOCK])
tmp218 = tl.sum(tmp216, 1)[:, None]
tmp219 = tmp176 * tmp193
tmp220 = tmp219 + tmp195
tmp221 = tmp192 + tmp195
tmp222 = tmp220 / tmp221
tmp223 = tmp195 - tmp222
tmp224 = tl.broadcast_to(tmp223, [XBLOCK, RBLOCK])
tmp226 = tl.sum(tmp224, 1)[:, None]
tmp227 = 4.0
tmp228 = tmp202 / tmp227
tmp229 = 0.0
tmp230 = tmp228 + tmp229
tmp231 = tmp210 / tmp227
tmp232 = tmp230 + tmp231
tmp233 = tmp218 / tmp227
tmp234 = tmp232 + tmp233
tmp235 = tmp226 / tmp227
tmp236 = tmp234 + tmp235
tmp237 = 0.25
tmp238 = tmp236 * tmp237
tmp239 = tmp238 / tmp195
tmp240 = tmp239 * tmp195
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp240, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pred], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pred], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf0, buf1, 16, grid=grid(16), stream=stream0)
del buf0
buf10 = empty_strided_cuda((), (), torch.float32)
buf14 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [mul, ne, valid_mask, valid_mask_1, mul_1, sum_1, mul_2, num, pow_1, pow_2, add_1, sum_2, den, truediv, loss, loss_1, total_loss, mul_3, valid_mask_2, mul_4, sum_3, mul_5, num_1, pow_3, pow_4, add_5, sum_4, den_1, truediv_1, loss_2, loss_3, total_loss_1, mul_6, valid_mask_3, mul_7, sum_5, mul_8, num_2, pow_5, pow_6, add_8, sum_6, den_2, truediv_2, loss_4, loss_5, total_loss_2, mul_9, valid_mask_4, mul_10, sum_7, mul_11, num_3, pow_7, pow_8, add_11, sum_8, den_3, truediv_3, loss_6, loss_7, total_loss_3, loss_8, loss_9, loss_10], Original ATen: [aten.mul, aten.ne, aten._to_copy, aten.view, aten.sum, aten.add, aten.pow, aten.div, aten.rsub, aten.mean]
triton_per_fused__to_copy_add_div_mean_mul_ne_pow_rsub_sum_view_2.run(buf14, buf1, arg1_1, 1, 4, grid=grid(1), stream=stream0)
del arg1_1
del buf1
return (buf14, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import functools
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch._C
import torch.serialization
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Average factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
assert weight.dim() == loss.dim()
if weight.dim() > 1:
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
loss = loss * weight
if avg_factor is None:
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def get_class_weight(class_weight):
"""Get class weight for loss function.
Args:
class_weight (list[float] | str | None): If class_weight is a str,
take it as a file name and read from it.
"""
if isinstance(class_weight, str):
if class_weight.endswith('.npy'):
class_weight = np.load(class_weight)
else:
class_weight = mmcv.load(class_weight)
return class_weight
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', avg_factor=
None, **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
@weighted_loss
def binary_dice_loss(pred, target, valid_mask, smooth=1, exponent=2, **kwards):
assert pred.shape[0] == target.shape[0]
pred = pred.reshape(pred.shape[0], -1)
target = target.reshape(target.shape[0], -1)
valid_mask = valid_mask.reshape(valid_mask.shape[0], -1)
num = torch.sum(torch.mul(pred, target) * valid_mask, dim=1) * 2 + smooth
den = torch.sum(pred.pow(exponent) + target.pow(exponent), dim=1) + smooth
return 1 - num / den
@weighted_loss
def dice_loss(pred, target, valid_mask, smooth=1, exponent=2, class_weight=
None, ignore_index=255):
assert pred.shape[0] == target.shape[0]
total_loss = 0
num_classes = pred.shape[1]
for i in range(num_classes):
if i != ignore_index:
dice_loss = binary_dice_loss(pred[:, i], target[..., i],
valid_mask=valid_mask, smooth=smooth, exponent=exponent)
if class_weight is not None:
dice_loss *= class_weight[i]
total_loss += dice_loss
return total_loss / num_classes
class DiceLoss(nn.Module):
"""DiceLoss.
This loss is proposed in `V-Net: Fully Convolutional Neural Networks for
Volumetric Medical Image Segmentation <https://arxiv.org/abs/1606.04797>`_.
Args:
loss_type (str, optional): Binary or multi-class loss.
Default: 'multi_class'. Options are "binary" and "multi_class".
smooth (float): A float number to smooth loss, and avoid NaN error.
Default: 1
exponent (float): An float number to calculate denominator
value: \\sum{x^exponent} + \\sum{y^exponent}. Default: 2.
reduction (str, optional): The method used to reduce the loss. Options
are "none", "mean" and "sum". This parameter only works when
per_image is True. Default: 'mean'.
class_weight (list[float] | str, optional): Weight of each class. If in
str format, read them from a file. Defaults to None.
loss_weight (float, optional): Weight of the loss. Default to 1.0.
ignore_index (int | None): The label index to be ignored. Default: 255.
loss_name (str, optional): Name of the loss item. If you want this loss
item to be included into the backward graph, `loss_` must be the
prefix of the name. Defaults to 'loss_dice'.
"""
def __init__(self, smooth=1, exponent=2, reduction='mean', class_weight
=None, loss_weight=1.0, ignore_index=255, loss_name='loss_dice', **
kwards):
super(DiceLoss, self).__init__()
self.smooth = smooth
self.exponent = exponent
self.reduction = reduction
self.class_weight = get_class_weight(class_weight)
self.loss_weight = loss_weight
self.ignore_index = ignore_index
self._loss_name = loss_name
def forward(self, pred, target, avg_factor=None, reduction_override=
None, **kwards):
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (reduction_override if reduction_override else self.
reduction)
if self.class_weight is not None:
class_weight = pred.new_tensor(self.class_weight)
else:
class_weight = None
pred = F.softmax(pred, dim=1)
num_classes = pred.shape[1]
one_hot_target = F.one_hot(torch.clamp(target.long(), 0,
num_classes - 1), num_classes=num_classes)
valid_mask = (target != self.ignore_index).long()
loss = self.loss_weight * dice_loss(pred, one_hot_target,
valid_mask=valid_mask, reduction=reduction, avg_factor=
avg_factor, smooth=self.smooth, exponent=self.exponent,
class_weight=class_weight, ignore_index=self.ignore_index)
return loss
@property
def loss_name(self):
"""Loss Name.
This function must be implemented and will return the name of this
loss function. This name will be used to combine different loss items
by simple sum operation. In addition, if you want this loss item to be
included into the backward graph, `loss_` must be the prefix of the
name.
Returns:
str: The name of this loss item.
"""
return self._loss_name
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import functools
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch._C
import torch.serialization
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_per_fused__to_copy_add_div_mean_mul_ne_pow_rsub_sum_view_2(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp42 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp71 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp112 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last'
)
tmp153 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last'
)
tmp2 = tmp1.to(tl.int64)
tmp3 = tl.full([1, 1], 0, tl.int64)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = tl.full([1, 1], 3, tl.int64)
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = tmp6 == tmp3
tmp8 = tmp7.to(tl.int64)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp0 * tmp9
tmp11 = 255.0
tmp12 = tmp1 != tmp11
tmp13 = tmp12.to(tl.int64)
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp10 * tmp14
tmp17 = tmp16.to(tl.int64)
tmp18 = triton_helpers.maximum(tmp17, tmp3)
tmp19 = triton_helpers.minimum(tmp18, tmp5)
tmp20 = tmp19 == tmp3
tmp21 = tmp20.to(tl.int64)
tmp22 = tmp21.to(tl.float32)
tmp23 = tmp0 * tmp22
tmp24 = tmp16 != tmp11
tmp25 = tmp24.to(tl.int64)
tmp26 = tmp25.to(tl.float32)
tmp27 = tmp23 * tmp26
tmp28 = tmp15 + tmp27
tmp30 = tmp29.to(tl.int64)
tmp31 = triton_helpers.maximum(tmp30, tmp3)
tmp32 = triton_helpers.minimum(tmp31, tmp5)
tmp33 = tmp32 == tmp3
tmp34 = tmp33.to(tl.int64)
tmp35 = tmp34.to(tl.float32)
tmp36 = tmp0 * tmp35
tmp37 = tmp29 != tmp11
tmp38 = tmp37.to(tl.int64)
tmp39 = tmp38.to(tl.float32)
tmp40 = tmp36 * tmp39
tmp41 = tmp28 + tmp40
tmp43 = tmp42.to(tl.int64)
tmp44 = triton_helpers.maximum(tmp43, tmp3)
tmp45 = triton_helpers.minimum(tmp44, tmp5)
tmp46 = tmp45 == tmp3
tmp47 = tmp46.to(tl.int64)
tmp48 = tmp47.to(tl.float32)
tmp49 = tmp0 * tmp48
tmp50 = tmp42 != tmp11
tmp51 = tmp50.to(tl.int64)
tmp52 = tmp51.to(tl.float32)
tmp53 = tmp49 * tmp52
tmp54 = tmp41 + tmp53
tmp55 = tmp0 * tmp0
tmp56 = tmp8 * tmp8
tmp57 = tmp56.to(tl.float32)
tmp58 = tmp55 + tmp57
tmp59 = tmp21 * tmp21
tmp60 = tmp59.to(tl.float32)
tmp61 = tmp55 + tmp60
tmp62 = tmp58 + tmp61
tmp63 = tmp34 * tmp34
tmp64 = tmp63.to(tl.float32)
tmp65 = tmp55 + tmp64
tmp66 = tmp62 + tmp65
tmp67 = tmp47 * tmp47
tmp68 = tmp67.to(tl.float32)
tmp69 = tmp55 + tmp68
tmp70 = tmp66 + tmp69
tmp72 = tl.full([1, 1], 1, tl.int64)
tmp73 = tmp6 == tmp72
tmp74 = tmp73.to(tl.int64)
tmp75 = tmp74.to(tl.float32)
tmp76 = tmp71 * tmp75
tmp77 = tmp76 * tmp14
tmp78 = tmp19 == tmp72
tmp79 = tmp78.to(tl.int64)
tmp80 = tmp79.to(tl.float32)
tmp81 = tmp71 * tmp80
tmp82 = tmp81 * tmp26
tmp83 = tmp77 + tmp82
tmp84 = tmp32 == tmp72
tmp85 = tmp84.to(tl.int64)
tmp86 = tmp85.to(tl.float32)
tmp87 = tmp71 * tmp86
tmp88 = tmp87 * tmp39
tmp89 = tmp83 + tmp88
tmp90 = tmp45 == tmp72
tmp91 = tmp90.to(tl.int64)
tmp92 = tmp91.to(tl.float32)
tmp93 = tmp71 * tmp92
tmp94 = tmp93 * tmp52
tmp95 = tmp89 + tmp94
tmp96 = tmp71 * tmp71
tmp97 = tmp74 * tmp74
tmp98 = tmp97.to(tl.float32)
tmp99 = tmp96 + tmp98
tmp100 = tmp79 * tmp79
tmp101 = tmp100.to(tl.float32)
tmp102 = tmp96 + tmp101
tmp103 = tmp99 + tmp102
tmp104 = tmp85 * tmp85
tmp105 = tmp104.to(tl.float32)
tmp106 = tmp96 + tmp105
tmp107 = tmp103 + tmp106
tmp108 = tmp91 * tmp91
tmp109 = tmp108.to(tl.float32)
tmp110 = tmp96 + tmp109
tmp111 = tmp107 + tmp110
tmp113 = tl.full([1, 1], 2, tl.int64)
tmp114 = tmp6 == tmp113
tmp115 = tmp114.to(tl.int64)
tmp116 = tmp115.to(tl.float32)
tmp117 = tmp112 * tmp116
tmp118 = tmp117 * tmp14
tmp119 = tmp19 == tmp113
tmp120 = tmp119.to(tl.int64)
tmp121 = tmp120.to(tl.float32)
tmp122 = tmp112 * tmp121
tmp123 = tmp122 * tmp26
tmp124 = tmp118 + tmp123
tmp125 = tmp32 == tmp113
tmp126 = tmp125.to(tl.int64)
tmp127 = tmp126.to(tl.float32)
tmp128 = tmp112 * tmp127
tmp129 = tmp128 * tmp39
tmp130 = tmp124 + tmp129
tmp131 = tmp45 == tmp113
tmp132 = tmp131.to(tl.int64)
tmp133 = tmp132.to(tl.float32)
tmp134 = tmp112 * tmp133
tmp135 = tmp134 * tmp52
tmp136 = tmp130 + tmp135
tmp137 = tmp112 * tmp112
tmp138 = tmp115 * tmp115
tmp139 = tmp138.to(tl.float32)
tmp140 = tmp137 + tmp139
tmp141 = tmp120 * tmp120
tmp142 = tmp141.to(tl.float32)
tmp143 = tmp137 + tmp142
tmp144 = tmp140 + tmp143
tmp145 = tmp126 * tmp126
tmp146 = tmp145.to(tl.float32)
tmp147 = tmp137 + tmp146
tmp148 = tmp144 + tmp147
tmp149 = tmp132 * tmp132
tmp150 = tmp149.to(tl.float32)
tmp151 = tmp137 + tmp150
tmp152 = tmp148 + tmp151
tmp154 = tmp6 == tmp5
tmp155 = tmp154.to(tl.int64)
tmp156 = tmp155.to(tl.float32)
tmp157 = tmp153 * tmp156
tmp158 = tmp157 * tmp14
tmp159 = tmp19 == tmp5
tmp160 = tmp159.to(tl.int64)
tmp161 = tmp160.to(tl.float32)
tmp162 = tmp153 * tmp161
tmp163 = tmp162 * tmp26
tmp164 = tmp158 + tmp163
tmp165 = tmp32 == tmp5
tmp166 = tmp165.to(tl.int64)
tmp167 = tmp166.to(tl.float32)
tmp168 = tmp153 * tmp167
tmp169 = tmp168 * tmp39
tmp170 = tmp164 + tmp169
tmp171 = tmp45 == tmp5
tmp172 = tmp171.to(tl.int64)
tmp173 = tmp172.to(tl.float32)
tmp174 = tmp153 * tmp173
tmp175 = tmp174 * tmp52
tmp176 = tmp170 + tmp175
tmp177 = tmp153 * tmp153
tmp178 = tmp155 * tmp155
tmp179 = tmp178.to(tl.float32)
tmp180 = tmp177 + tmp179
tmp181 = tmp160 * tmp160
tmp182 = tmp181.to(tl.float32)
tmp183 = tmp177 + tmp182
tmp184 = tmp180 + tmp183
tmp185 = tmp166 * tmp166
tmp186 = tmp185.to(tl.float32)
tmp187 = tmp177 + tmp186
tmp188 = tmp184 + tmp187
tmp189 = tmp172 * tmp172
tmp190 = tmp189.to(tl.float32)
tmp191 = tmp177 + tmp190
tmp192 = tmp188 + tmp191
tmp193 = 2.0
tmp194 = tmp54 * tmp193
tmp195 = 1.0
tmp196 = tmp194 + tmp195
tmp197 = tmp70 + tmp195
tmp198 = tmp196 / tmp197
tmp199 = tmp195 - tmp198
tmp200 = tl.broadcast_to(tmp199, [XBLOCK, RBLOCK])
tmp202 = tl.sum(tmp200, 1)[:, None]
tmp203 = tmp95 * tmp193
tmp204 = tmp203 + tmp195
tmp205 = tmp111 + tmp195
tmp206 = tmp204 / tmp205
tmp207 = tmp195 - tmp206
tmp208 = tl.broadcast_to(tmp207, [XBLOCK, RBLOCK])
tmp210 = tl.sum(tmp208, 1)[:, None]
tmp211 = tmp136 * tmp193
tmp212 = tmp211 + tmp195
tmp213 = tmp152 + tmp195
tmp214 = tmp212 / tmp213
tmp215 = tmp195 - tmp214
tmp216 = tl.broadcast_to(tmp215, [XBLOCK, RBLOCK])
tmp218 = tl.sum(tmp216, 1)[:, None]
tmp219 = tmp176 * tmp193
tmp220 = tmp219 + tmp195
tmp221 = tmp192 + tmp195
tmp222 = tmp220 / tmp221
tmp223 = tmp195 - tmp222
tmp224 = tl.broadcast_to(tmp223, [XBLOCK, RBLOCK])
tmp226 = tl.sum(tmp224, 1)[:, None]
tmp227 = 4.0
tmp228 = tmp202 / tmp227
tmp229 = 0.0
tmp230 = tmp228 + tmp229
tmp231 = tmp210 / tmp227
tmp232 = tmp230 + tmp231
tmp233 = tmp218 / tmp227
tmp234 = tmp232 + tmp233
tmp235 = tmp226 / tmp227
tmp236 = tmp234 + tmp235
tmp237 = 0.25
tmp238 = tmp236 * tmp237
tmp239 = tmp238 / tmp195
tmp240 = tmp239 * tmp195
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp240, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(16)](buf0, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf0
buf10 = empty_strided_cuda((), (), torch.float32)
buf14 = buf10
del buf10
triton_per_fused__to_copy_add_div_mean_mul_ne_pow_rsub_sum_view_2[grid
(1)](buf14, buf1, arg1_1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1
)
del arg1_1
del buf1
return buf14,
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Average factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
assert weight.dim() == loss.dim()
if weight.dim() > 1:
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
loss = loss * weight
if avg_factor is None:
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def get_class_weight(class_weight):
"""Get class weight for loss function.
Args:
class_weight (list[float] | str | None): If class_weight is a str,
take it as a file name and read from it.
"""
if isinstance(class_weight, str):
if class_weight.endswith('.npy'):
class_weight = np.load(class_weight)
else:
class_weight = mmcv.load(class_weight)
return class_weight
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', avg_factor=
None, **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
@weighted_loss
def binary_dice_loss(pred, target, valid_mask, smooth=1, exponent=2, **kwards):
assert pred.shape[0] == target.shape[0]
pred = pred.reshape(pred.shape[0], -1)
target = target.reshape(target.shape[0], -1)
valid_mask = valid_mask.reshape(valid_mask.shape[0], -1)
num = torch.sum(torch.mul(pred, target) * valid_mask, dim=1) * 2 + smooth
den = torch.sum(pred.pow(exponent) + target.pow(exponent), dim=1) + smooth
return 1 - num / den
@weighted_loss
def dice_loss(pred, target, valid_mask, smooth=1, exponent=2, class_weight=
None, ignore_index=255):
assert pred.shape[0] == target.shape[0]
total_loss = 0
num_classes = pred.shape[1]
for i in range(num_classes):
if i != ignore_index:
dice_loss = binary_dice_loss(pred[:, i], target[..., i],
valid_mask=valid_mask, smooth=smooth, exponent=exponent)
if class_weight is not None:
dice_loss *= class_weight[i]
total_loss += dice_loss
return total_loss / num_classes
class DiceLossNew(nn.Module):
"""DiceLoss.
This loss is proposed in `V-Net: Fully Convolutional Neural Networks for
Volumetric Medical Image Segmentation <https://arxiv.org/abs/1606.04797>`_.
Args:
loss_type (str, optional): Binary or multi-class loss.
Default: 'multi_class'. Options are "binary" and "multi_class".
smooth (float): A float number to smooth loss, and avoid NaN error.
Default: 1
exponent (float): An float number to calculate denominator
value: \\sum{x^exponent} + \\sum{y^exponent}. Default: 2.
reduction (str, optional): The method used to reduce the loss. Options
are "none", "mean" and "sum". This parameter only works when
per_image is True. Default: 'mean'.
class_weight (list[float] | str, optional): Weight of each class. If in
str format, read them from a file. Defaults to None.
loss_weight (float, optional): Weight of the loss. Default to 1.0.
ignore_index (int | None): The label index to be ignored. Default: 255.
loss_name (str, optional): Name of the loss item. If you want this loss
item to be included into the backward graph, `loss_` must be the
prefix of the name. Defaults to 'loss_dice'.
"""
def __init__(self, smooth=1, exponent=2, reduction='mean', class_weight
=None, loss_weight=1.0, ignore_index=255, loss_name='loss_dice', **
kwards):
super(DiceLossNew, self).__init__()
self.smooth = smooth
self.exponent = exponent
self.reduction = reduction
self.class_weight = get_class_weight(class_weight)
self.loss_weight = loss_weight
self.ignore_index = ignore_index
self._loss_name = loss_name
@property
def loss_name(self):
"""Loss Name.
This function must be implemented and will return the name of this
loss function. This name will be used to combine different loss items
by simple sum operation. In addition, if you want this loss item to be
included into the backward graph, `loss_` must be the prefix of the
name.
Returns:
str: The name of this loss item.
"""
return self._loss_name
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| ImportPaddle/APCNet | DiceLoss | false | 2,369 | [
"MIT"
] | 0 | 68ade1f83827b4cdd60ee4b6ac25454397100316 | https://github.com/ImportPaddle/APCNet/tree/68ade1f83827b4cdd60ee4b6ac25454397100316 | import functools
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch._C
import torch.serialization
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Average factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
assert weight.dim() == loss.dim()
if weight.dim() > 1:
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
loss = loss * weight
if avg_factor is None:
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def get_class_weight(class_weight):
"""Get class weight for loss function.
Args:
class_weight (list[float] | str | None): If class_weight is a str,
take it as a file name and read from it.
"""
if isinstance(class_weight, str):
if class_weight.endswith('.npy'):
class_weight = np.load(class_weight)
else:
class_weight = mmcv.load(class_weight)
return class_weight
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', avg_factor=
None, **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
@weighted_loss
def binary_dice_loss(pred, target, valid_mask, smooth=1, exponent=2, **kwards):
assert pred.shape[0] == target.shape[0]
pred = pred.reshape(pred.shape[0], -1)
target = target.reshape(target.shape[0], -1)
valid_mask = valid_mask.reshape(valid_mask.shape[0], -1)
num = torch.sum(torch.mul(pred, target) * valid_mask, dim=1) * 2 + smooth
den = torch.sum(pred.pow(exponent) + target.pow(exponent), dim=1) + smooth
return 1 - num / den
@weighted_loss
def dice_loss(pred, target, valid_mask, smooth=1, exponent=2, class_weight=
None, ignore_index=255):
assert pred.shape[0] == target.shape[0]
total_loss = 0
num_classes = pred.shape[1]
for i in range(num_classes):
if i !=
# ... truncated (>4000 chars) for memory efficiency |
MultiHeadAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/rh/crhy6nilvaajphuuoyup37xl4ncuiyrcb3fnt5aboux6wyvcg7ie.py
# Topologically Sorted Source Nodes: [matmul_qk], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul_qk => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (64*y1)), xmask & ymask)
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (16*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ss/csssqqlybvm2dlovzxhxorvbhyaj46oqebdryly2s5obgs7rshwq.py
# Topologically Sorted Source Nodes: [mul, scaled_attention_logits_1, attention_weights], Original ATen: [aten.mul, aten.add, aten._softmax]
# Source node to ATen node mapping:
# attention_weights => amax, div_1, exp, sub, sum_1
# mul => mul
# scaled_attention_logits_1 => add
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_10, -1000000000.0), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %mul), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_per_fused__softmax_add_mul_1 = async_compile.triton('triton_per_fused__softmax_add_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[256, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_add_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_add_mul_1(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 256
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + (16*x0)), xmask, other=0.0)
tmp2 = -1000000000.0
tmp3 = tmp1 * tmp2
tmp4 = tmp0 + tmp3
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp5, float("-inf"))
tmp8 = triton_helpers.max2(tmp7, 1)[:, None]
tmp9 = tmp4 - tmp8
tmp10 = tl_math.exp(tmp9)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tmp15 = tmp10 / tmp14
tl.store(out_ptr2 + (r1 + (16*x0)), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/mz/cmzlu2lip25blpsdqeby7ek5757op6xw3pdkxbdediou5szw32tx.py
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# output_1 => clone_3
# Graph fragment:
# %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%view_15,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = (yindex // 16)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ok/cokamvfj3z4xuz3jmalftfns3huimimr3c4gzm52vaybmdliglu4.py
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.add]
# Source node to ATen node mapping:
# output_1 => add_1
# Graph fragment:
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_12), kwargs = {})
triton_poi_fused_add_3 = async_compile.triton('triton_poi_fused_add_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_10, (4, 4, 16, 16), (1024, 256, 16, 1))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_9, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 16, 1), (64, 16, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_qk], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf0, primals_3, buf3, 16, 16, grid=grid(16, 16), stream=stream0)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 16), (64, 16, 16, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [matmul_qk], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf1, primals_5, buf4, 16, 16, grid=grid(16, 16), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((16, 16, 16), (256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_qk], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 16, 1), (16, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 16), (16, 0, 1), 0), out=buf5)
buf8 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, scaled_attention_logits_1, attention_weights], Original ATen: [aten.mul, aten.add, aten._softmax]
triton_per_fused__softmax_add_mul_1.run(buf5, primals_10, buf8, 256, 16, grid=grid(256), stream=stream0)
del buf5
del primals_10
buf9 = reinterpret_tensor(buf1, (4, 4, 16, 1), (64, 16, 1, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf2, primals_8, buf9, 16, 16, grid=grid(16, 16), stream=stream0)
del primals_8
buf10 = reinterpret_tensor(buf2, (16, 16, 1), (16, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 16, 16), (256, 16, 1), 0), reinterpret_tensor(buf9, (16, 16, 1), (16, 1, 0), 0), out=buf10)
buf11 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf10, buf11, 64, 4, grid=grid(64, 4), stream=stream0)
buf12 = reinterpret_tensor(buf10, (64, 4), (4, 1), 0); del buf10 # reuse
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf11, (64, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf12)
buf13 = reinterpret_tensor(buf12, (4, 16, 4), (64, 4, 1), 0); del buf12 # reuse
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.add]
triton_poi_fused_add_3.run(buf13, primals_12, 256, grid=grid(256), stream=stream0)
del primals_12
return (buf13, buf8, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_9, (64, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (64, 4), (4, 1), 0), primals_11, reinterpret_tensor(buf9, (16, 1, 16), (16, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 16), (16, 1, 1), 0), reinterpret_tensor(buf4, (16, 16, 1), (16, 1, 16), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4, 16, 16), (1024, 256, 16, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
def scaled_dot_product_attention(q, k, v, mask):
"""
q: query = (..., seq_len_q, depth)
k: key = (..., seq_len_k, depth)
v: value = (..., seq_len_v, depth_v)
mask: float tensor with shape broadcastable to
(..., seq_len_q, seq_len_k)
must have seq_len_k == seq_len_v
Returns:
output, attention_weights
"""
matmul_qk = torch.matmul(q, torch.transpose(k, -1, -2))
dk = torch.tensor(k.shape[-1], dtype=torch.float32)
scaled_attention_logits = matmul_qk / torch.sqrt(dk)
if mask is not None:
scaled_attention_logits += mask * -1000000000.0
attention_weights = F.softmax(scaled_attention_logits, dim=-1)
output = torch.matmul(attention_weights, v)
return output, attention_weights
class MultiHeadAttention(nn.Module):
def __init__(self, d_model, num_heads):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.d_model = d_model
assert d_model % self.num_heads == 0
self.depth = d_model // self.num_heads
self.wq = nn.Linear(d_model, d_model)
self.wk = nn.Linear(d_model, d_model)
self.wv = nn.Linear(d_model, d_model)
self.linear = nn.Linear(d_model, d_model)
def split_heads(self, x, batch_size):
"""
Split the last dimension into (num_heads, depth).
Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth)
x : (batch_size, seq_len, d_model)
batch_size : int
Returns:
x : (batch_size, num_heads, seq_len, depth)
"""
x = x.view(batch_size, -1, self.num_heads, self.depth)
return x.permute(0, 2, 1, 3)
def forward(self, v, k, q, mask):
batch_size = q.size()[0]
q = self.wq(q)
k = self.wk(k)
v = self.wv(v)
q = self.split_heads(q, batch_size)
k = self.split_heads(k, batch_size)
v = self.split_heads(v, batch_size)
scaled_attention, attention_weights = scaled_dot_product_attention(q,
k, v, mask)
scaled_attention = scaled_attention.permute(0, 2, 1, 3)
concat_attention = scaled_attention.reshape(batch_size, -1, self.
d_model)
output = self.linear(concat_attention)
return output, attention_weights
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 16, 16])]
def get_init_inputs():
return [[], {'d_model': 4, 'num_heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_per_fused__softmax_add_mul_1(in_ptr0, in_ptr1, out_ptr2, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 256
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0)
tmp2 = -1000000000.0
tmp3 = tmp1 * tmp2
tmp4 = tmp0 + tmp3
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp5, float('-inf'))
tmp8 = triton_helpers.max2(tmp7, 1)[:, None]
tmp9 = tmp4 - tmp8
tmp10 = tl_math.exp(tmp9)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tmp15 = tmp10 / tmp14
tl.store(out_ptr2 + (r1 + 16 * x0), tmp15, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_10, (4, 4, 16, 16), (1024, 256, 16, 1))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_9, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 16, 1), (64, 16, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 16)](buf0, primals_3, buf3, 16,
16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 16), (64, 16, 16, 1), 0)
del buf0
triton_poi_fused_clone_0[grid(16, 16)](buf1, primals_5, buf4, 16,
16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 16, 16), (256, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 16, 1), (16, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 16), (16, 0, 1), 0), out=buf5)
buf8 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch
.float32)
triton_per_fused__softmax_add_mul_1[grid(256)](buf5, primals_10,
buf8, 256, 16, XBLOCK=128, num_warps=8, num_stages=1)
del buf5
del primals_10
buf9 = reinterpret_tensor(buf1, (4, 4, 16, 1), (64, 16, 1, 1), 0)
del buf1
triton_poi_fused_clone_0[grid(16, 16)](buf2, primals_8, buf9, 16,
16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_8
buf10 = reinterpret_tensor(buf2, (16, 16, 1), (16, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 16, 16), (256, 16,
1), 0), reinterpret_tensor(buf9, (16, 16, 1), (16, 1, 0), 0),
out=buf10)
buf11 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
triton_poi_fused_clone_2[grid(64, 4)](buf10, buf11, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf12 = reinterpret_tensor(buf10, (64, 4), (4, 1), 0)
del buf10
extern_kernels.mm(reinterpret_tensor(buf11, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf12)
buf13 = reinterpret_tensor(buf12, (4, 16, 4), (64, 4, 1), 0)
del buf12
triton_poi_fused_add_3[grid(256)](buf13, primals_12, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_12
return buf13, buf8, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_9, (64, 4), (4, 1), 0
), buf8, reinterpret_tensor(buf11, (64, 4), (4, 1), 0
), primals_11, reinterpret_tensor(buf9, (16, 1, 16), (16, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 16), (16, 1, 1), 0
), reinterpret_tensor(buf4, (16, 16, 1), (16, 1, 16), 0)
def scaled_dot_product_attention(q, k, v, mask):
"""
q: query = (..., seq_len_q, depth)
k: key = (..., seq_len_k, depth)
v: value = (..., seq_len_v, depth_v)
mask: float tensor with shape broadcastable to
(..., seq_len_q, seq_len_k)
must have seq_len_k == seq_len_v
Returns:
output, attention_weights
"""
matmul_qk = torch.matmul(q, torch.transpose(k, -1, -2))
dk = torch.tensor(k.shape[-1], dtype=torch.float32)
scaled_attention_logits = matmul_qk / torch.sqrt(dk)
if mask is not None:
scaled_attention_logits += mask * -1000000000.0
attention_weights = F.softmax(scaled_attention_logits, dim=-1)
output = torch.matmul(attention_weights, v)
return output, attention_weights
class MultiHeadAttentionNew(nn.Module):
def __init__(self, d_model, num_heads):
super(MultiHeadAttentionNew, self).__init__()
self.num_heads = num_heads
self.d_model = d_model
assert d_model % self.num_heads == 0
self.depth = d_model // self.num_heads
self.wq = nn.Linear(d_model, d_model)
self.wk = nn.Linear(d_model, d_model)
self.wv = nn.Linear(d_model, d_model)
self.linear = nn.Linear(d_model, d_model)
def split_heads(self, x, batch_size):
"""
Split the last dimension into (num_heads, depth).
Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth)
x : (batch_size, seq_len, d_model)
batch_size : int
Returns:
x : (batch_size, num_heads, seq_len, depth)
"""
x = x.view(batch_size, -1, self.num_heads, self.depth)
return x.permute(0, 2, 1, 3)
def forward(self, input_0, input_1, input_2, input_3):
primals_2 = self.wq.weight
primals_3 = self.wq.bias
primals_4 = self.wk.weight
primals_5 = self.wk.bias
primals_7 = self.wv.weight
primals_8 = self.wv.bias
primals_11 = self.linear.weight
primals_12 = self.linear.bias
primals_1 = input_0
primals_6 = input_1
primals_9 = input_2
primals_10 = input_3
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0], output[1]
| IanYHWu/transformers-for-translation | MultiHeadAttention | false | 2,370 | [
"MIT"
] | 0 | b763e58deb2263507eecd2eb569fbaf5c1dd9df8 | https://github.com/IanYHWu/transformers-for-translation/tree/b763e58deb2263507eecd2eb569fbaf5c1dd9df8 | import torch
import torch.nn as nn
import torch.nn.functional as F
def scaled_dot_product_attention(q, k, v, mask):
"""
q: query = (..., seq_len_q, depth)
k: key = (..., seq_len_k, depth)
v: value = (..., seq_len_v, depth_v)
mask: float tensor with shape broadcastable to
(..., seq_len_q, seq_len_k)
must have seq_len_k == seq_len_v
Returns:
output, attention_weights
"""
matmul_qk = torch.matmul(q, torch.transpose(k, -1, -2))
dk = torch.tensor(k.shape[-1], dtype=torch.float32)
scaled_attention_logits = matmul_qk / torch.sqrt(dk)
if mask is not None:
scaled_attention_logits += mask * -1000000000.0
attention_weights = F.softmax(scaled_attention_logits, dim=-1)
output = torch.matmul(attention_weights, v)
return output, attention_weights
class Model(nn.Module):
def __init__(self, d_model, num_heads):
super().__init__()
self.num_heads = num_heads
self.d_model = d_model
assert d_model % self.num_heads == 0
self.depth = d_model // self.num_heads
self.wq = nn.Linear(d_model, d_model)
self.wk = nn.Linear(d_model, d_model)
self.wv = nn.Linear(d_model, d_model)
self.linear = nn.Linear(d_model, d_model)
def split_heads(self, x, batch_size):
"""
Split the last dimension into (num_heads, depth).
Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth)
x : (batch_size, seq_len, d_model)
batch_size : int
Returns:
x : (batch_size, num_heads, seq_len, depth)
"""
x = x.view(batch_size, -1, self.num_heads, self.depth)
return x.permute(0, 2, 1, 3)
def forward(self, v, k, q, mask):
batch_size = q.size()[0]
q = self.wq(q)
k = self.wk(k)
v = self.wv(v)
q = self.split_heads(q, batch_size)
k = self.split_heads(k, batch_size)
v = self.split_heads(v, batch_size)
scaled_attention, attention_weights = scaled_dot_product_attention(q,
k, v, mask)
scaled_attention = scaled_attention.permute(0, 2, 1, 3)
concat_attention = scaled_attention.reshape(batch_size, -1, self.
d_model)
output = self.linear(concat_attention)
return output, attention_weights
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 16, 16])]
def get_init_inputs():
return [4, 4]
|
BilinearUpsample | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/z2/cz24gi4s7xzyox5sbvrpri4ak7fy6xgopry5wbciybyk7nkbnbwl.py
# Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten._to_copy, aten.arange, aten.add, aten.mul, aten.sub, aten.clamp, aten._unsafe_index]
# Source node to ATen node mapping:
# interpolate => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add_2, add_4, add_5, add_6, clamp_max_2, clamp_max_3, clamp_min_1, clamp_min_2, clamp_min_3, convert_element_type_1, convert_element_type_2, convert_element_type_3, iota_1, mul_1, mul_2, mul_3, mul_4, sub_1, sub_2, sub_3, sub_4, sub_5, sub_6
# Graph fragment:
# %convert_element_type_1 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {})
# %iota_1 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type_2 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_1, torch.float32), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_2, 0.5), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, 1.0), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, 0.5), kwargs = {})
# %clamp_min_1 : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_1, 0.0), kwargs = {})
# %convert_element_type_3 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%clamp_min_1, torch.int64), kwargs = {})
# %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max, %clamp_max_1]), kwargs = {})
# %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_1, %convert_element_type_3), kwargs = {})
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 0.0), kwargs = {})
# %clamp_max_2 : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_2), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_3), kwargs = {})
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {})
# %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %clamp_max_2), kwargs = {})
# %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_2), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %add_4), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %convert_element_type_1), kwargs = {})
# %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_5, 0.0), kwargs = {})
# %clamp_max_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_3, 1.0), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %clamp_max_3), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %mul_4), kwargs = {})
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0 = async_compile.triton('triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x0 = xindex % 4
x2 = (xindex // 16)
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tl.full([1], 1, tl.int64)
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tmp14 = x0
tmp15 = tmp14.to(tl.float32)
tmp16 = tmp15 + tmp2
tmp17 = tmp16 * tmp4
tmp18 = tmp17 - tmp2
tmp19 = triton_helpers.maximum(tmp18, tmp7)
tmp20 = tmp19.to(tl.int32)
tmp21 = tmp20 + tmp10
tmp22 = triton_helpers.minimum(tmp21, tmp12)
tmp23 = tl.load(in_ptr0 + (tmp22 + (4*tmp13) + (16*x2)), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr0 + (tmp20 + (4*tmp13) + (16*x2)), xmask, eviction_policy='evict_last')
tmp25 = tmp23 - tmp24
tmp26 = tmp20.to(tl.float32)
tmp27 = tmp19 - tmp26
tmp28 = triton_helpers.maximum(tmp27, tmp7)
tmp29 = triton_helpers.minimum(tmp28, tmp4)
tmp30 = tmp25 * tmp29
tmp31 = tmp24 + tmp30
tmp32 = tl.load(in_ptr0 + (tmp20 + (4*tmp9) + (16*x2)), xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr0 + (tmp22 + (4*tmp9) + (16*x2)), xmask, eviction_policy='evict_last')
tmp34 = tmp33 - tmp32
tmp35 = tmp34 * tmp29
tmp36 = tmp32 + tmp35
tmp37 = tmp31 - tmp36
tmp38 = tmp9.to(tl.float32)
tmp39 = tmp8 - tmp38
tmp40 = triton_helpers.maximum(tmp39, tmp7)
tmp41 = triton_helpers.minimum(tmp40, tmp4)
tmp42 = tmp37 * tmp41
tmp43 = tmp36 + tmp42
tl.store(in_out_ptr0 + (x4), tmp43, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = buf0; del buf0 # reuse
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten._to_copy, aten.arange, aten.add, aten.mul, aten.sub, aten.clamp, aten._unsafe_index]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0.run(buf2, arg0_1, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from typing import Union
from typing import List
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
class BilinearUpsample(nn.Module):
"""
Overview:
Upsamples the input to the given member varible scale_factor using mode biliner
Interface:
forward
"""
def __init__(self, scale_factor: 'Union[float, List[float]]') ->None:
"""
Overview:
Init class BilinearUpsample
Arguments:
- scale_factor (:obj:`Union[float, List[float]]`): multiplier for spatial size
"""
super(BilinearUpsample, self).__init__()
self.scale_factor = scale_factor
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
"""
Overview:
Return the upsampled input
Arguments:
- x (:obj:`torch.Tensor`): the input tensor
Returns:
- upsample(:obj:`torch.Tensor`): the upsampled input tensor
"""
return F.interpolate(x, scale_factor=self.scale_factor, mode=
'bilinear', align_corners=False)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'scale_factor': 1.0}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from typing import Union
from typing import List
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(
in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x2 = xindex // 16
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tl.full([1], 1, tl.int64)
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tmp14 = x0
tmp15 = tmp14.to(tl.float32)
tmp16 = tmp15 + tmp2
tmp17 = tmp16 * tmp4
tmp18 = tmp17 - tmp2
tmp19 = triton_helpers.maximum(tmp18, tmp7)
tmp20 = tmp19.to(tl.int32)
tmp21 = tmp20 + tmp10
tmp22 = triton_helpers.minimum(tmp21, tmp12)
tmp23 = tl.load(in_ptr0 + (tmp22 + 4 * tmp13 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp24 = tl.load(in_ptr0 + (tmp20 + 4 * tmp13 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp25 = tmp23 - tmp24
tmp26 = tmp20.to(tl.float32)
tmp27 = tmp19 - tmp26
tmp28 = triton_helpers.maximum(tmp27, tmp7)
tmp29 = triton_helpers.minimum(tmp28, tmp4)
tmp30 = tmp25 * tmp29
tmp31 = tmp24 + tmp30
tmp32 = tl.load(in_ptr0 + (tmp20 + 4 * tmp9 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp33 = tl.load(in_ptr0 + (tmp22 + 4 * tmp9 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp34 = tmp33 - tmp32
tmp35 = tmp34 * tmp29
tmp36 = tmp32 + tmp35
tmp37 = tmp31 - tmp36
tmp38 = tmp9.to(tl.float32)
tmp39 = tmp8 - tmp38
tmp40 = triton_helpers.maximum(tmp39, tmp7)
tmp41 = triton_helpers.minimum(tmp40, tmp4)
tmp42 = tmp37 * tmp41
tmp43 = tmp36 + tmp42
tl.store(in_out_ptr0 + x4, tmp43, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = buf0
del buf0
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid
(256)](buf2, arg0_1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf2,
class BilinearUpsampleNew(nn.Module):
"""
Overview:
Upsamples the input to the given member varible scale_factor using mode biliner
Interface:
forward
"""
def __init__(self, scale_factor: 'Union[float, List[float]]') ->None:
"""
Overview:
Init class BilinearUpsample
Arguments:
- scale_factor (:obj:`Union[float, List[float]]`): multiplier for spatial size
"""
super(BilinearUpsampleNew, self).__init__()
self.scale_factor = scale_factor
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Hcnaeg/DI-engine | BilinearUpsample | false | 2,371 | [
"Apache-2.0"
] | 0 | aba0c629f87649854091e9e59d948f83962e3e1e | https://github.com/Hcnaeg/DI-engine/tree/aba0c629f87649854091e9e59d948f83962e3e1e | import torch
from typing import Union
from typing import List
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
class Model(nn.Module):
"""
Overview:
Upsamples the input to the given member varible scale_factor using mode biliner
Interface:
forward
"""
def __init__(self, scale_factor: 'Union[float, List[float]]') ->None:
"""
Overview:
Init class BilinearUpsample
Arguments:
- scale_factor (:obj:`Union[float, List[float]]`): multiplier for spatial size
"""
super().__init__()
self.scale_factor = scale_factor
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
"""
Overview:
Return the upsampled input
Arguments:
- x (:obj:`torch.Tensor`): the input tensor
Returns:
- upsample(:obj:`torch.Tensor`): the upsampled input tensor
"""
return F.interpolate(x, scale_factor=self.scale_factor, mode=
'bilinear', align_corners=False)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [1.0]
|
SpatialGatherModule | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/iv/civr7hz7pwb7nd5q352sqsjvxezkx6m6jnyztaygkt2ugewh5ejx.py
# Topologically Sorted Source Nodes: [probs_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# probs_1 => div, exp, sum_1
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [2], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 1.0), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_per_fused__softmax_0 = async_compile.triton('triton_per_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, float("-inf"))
tmp6 = triton_helpers.max2(tmp5, 1)[:, None]
tmp7 = tmp2 - tmp6
tmp8 = tmp7 * tmp1
tmp9 = tl_math.exp(tmp8)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.where(xmask, tmp10, 0)
tmp13 = tl.sum(tmp12, 1)[:, None]
tmp14 = tmp9 / tmp13
tl.store(out_ptr2 + (r1 + (16*x0)), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/vq/cvqpprnukykv7fb6t2uveui44qrapemorby5j3fnnfeymwpqwe63.py
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [probs_1], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_per_fused__softmax_0.run(arg0_1, buf2, 16, 16, grid=grid(16), stream=stream0)
del arg0_1
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [probs_1, ocr_context], Original ATen: [aten._softmax, aten.bmm]
extern_kernels.bmm(buf2, reinterpret_tensor(arg1_1, (4, 16, 4), (64, 1, 16), 0), out=buf3)
del arg1_1
del buf2
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf3, buf4, 16, 4, grid=grid(16, 4), stream=stream0)
del buf3
return (reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch._C
import torch.serialization
class SpatialGatherModule(nn.Module):
"""Aggregate the context features according to the initial predicted
probability distribution.
Employ the soft-weighted method to aggregate the context.
"""
def __init__(self, scale):
super(SpatialGatherModule, self).__init__()
self.scale = scale
def forward(self, feats, probs):
"""Forward function."""
batch_size, num_classes, _height, _width = probs.size()
channels = feats.size(1)
probs = probs.view(batch_size, num_classes, -1)
feats = feats.view(batch_size, channels, -1)
feats = feats.permute(0, 2, 1)
probs = F.softmax(self.scale * probs, dim=2)
ocr_context = torch.matmul(probs, feats)
ocr_context = ocr_context.permute(0, 2, 1).contiguous().unsqueeze(3)
return ocr_context
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'scale': 1.0}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch._C
import torch.serialization
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__softmax_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, float('-inf'))
tmp6 = triton_helpers.max2(tmp5, 1)[:, None]
tmp7 = tmp2 - tmp6
tmp8 = tmp7 * tmp1
tmp9 = tl_math.exp(tmp8)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.where(xmask, tmp10, 0)
tmp13 = tl.sum(tmp12, 1)[:, None]
tmp14 = tmp9 / tmp13
tl.store(out_ptr2 + (r1 + 16 * x0), tmp14, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__softmax_0[grid(16)](arg0_1, buf2, 16, 16, XBLOCK=
8, num_warps=2, num_stages=1)
del arg0_1
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf2, reinterpret_tensor(arg1_1, (4, 16, 4), (64,
1, 16), 0), out=buf3)
del arg1_1
del buf2
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_1[grid(16, 4)](buf3, buf4, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
del buf3
return reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0),
class SpatialGatherModuleNew(nn.Module):
"""Aggregate the context features according to the initial predicted
probability distribution.
Employ the soft-weighted method to aggregate the context.
"""
def __init__(self, scale):
super(SpatialGatherModuleNew, self).__init__()
self.scale = scale
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| ImportPaddle/APCNet | SpatialGatherModule | false | 2,372 | [
"MIT"
] | 0 | 68ade1f83827b4cdd60ee4b6ac25454397100316 | https://github.com/ImportPaddle/APCNet/tree/68ade1f83827b4cdd60ee4b6ac25454397100316 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch._C
import torch.serialization
class Model(nn.Module):
"""Aggregate the context features according to the initial predicted
probability distribution.
Employ the soft-weighted method to aggregate the context.
"""
def __init__(self, scale):
super().__init__()
self.scale = scale
def forward(self, feats, probs):
"""Forward function."""
batch_size, num_classes, _height, _width = probs.size()
channels = feats.size(1)
probs = probs.view(batch_size, num_classes, -1)
feats = feats.view(batch_size, channels, -1)
feats = feats.permute(0, 2, 1)
probs = F.softmax(self.scale * probs, dim=2)
ocr_context = torch.matmul(probs, feats)
ocr_context = ocr_context.permute(0, 2, 1).contiguous().unsqueeze(3)
return ocr_context
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [1.0]
|
LabelSmoothCELoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/le/clewmq2oyakpojeemfsrrjq5tneb2unj5om75r32lnu3wfwo4lbd.py
# Topologically Sorted Source Nodes: [logits], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# logits => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {})
triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/u4/cu4kfpb4bm4o7xwrvdjhv3pjm46tbhctvi7mgrront5rs3v44s3v.py
# Topologically Sorted Source Nodes: [logits, scatter_, mul, sum_1, neg, truediv], Original ATen: [aten._log_softmax, aten.scatter, aten.mul, aten.sum, aten.neg, aten.div]
# Source node to ATen node mapping:
# logits => exp, log, sub_1, sum_1
# mul => mul
# neg => neg
# scatter_ => scatter_upon_const_tensor
# sum_1 => sum_2
# truediv => div
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
# %scatter_upon_const_tensor : [num_users=1] = call_function[target=torch._inductor.fx_passes.post_grad.scatter_upon_const_tensor](args = (), kwargs = {shape: [4, 4], background_val: 1.3333333333333333, dtype: torch.float32, dim: 1, selector: %unsqueeze, val: -0.33333333333333326})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %scatter_upon_const_tensor), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_2,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%neg, 4), kwargs = {})
triton_per_fused__log_softmax_div_mul_neg_scatter_sum_1 = async_compile.triton('triton_per_fused__log_softmax_div_mul_neg_scatter_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i64', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_div_mul_neg_scatter_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_div_mul_neg_scatter_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
r1 = (rindex // 4)
r0 = rindex % 4
tmp0 = tl.load(in_ptr0 + (r2), None)
tmp1 = tl.load(in_ptr0 + (4*r1), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*r1)), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*r1)), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*r1)), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (r1), None, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp15 = r0
tmp16 = tmp14 == tmp15
tmp17 = -0.33333333333333326
tmp18 = 1.3333333333333333
tmp19 = tl.where(tmp16, tmp17, tmp18)
tmp20 = tmp13 * tmp19
tmp21 = tl.broadcast_to(tmp20, [XBLOCK, RBLOCK])
tmp23 = tl.sum(tmp21, 1)[:, None]
tmp24 = -tmp23
tmp25 = 0.25
tmp26 = tmp24 * tmp25
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp26, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [logits], Original ATen: [aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [logits, scatter_, mul, sum_1, neg, truediv], Original ATen: [aten._log_softmax, aten.scatter, aten.mul, aten.sum, aten.neg, aten.div]
triton_per_fused__log_softmax_div_mul_neg_scatter_sum_1.run(buf2, buf0, arg1_1, 1, 16, grid=grid(1), stream=stream0)
del arg1_1
del buf0
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.int64)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
def one_hot(val: 'torch.LongTensor', num: 'int', num_first: 'bool'=False
) ->torch.FloatTensor:
"""
Overview:
Convert a ``torch.LongTensor`` to one hot encoding.
This implementation can be slightly faster than ``torch.nn.functional.one_hot``
Arguments:
- val (:obj:`torch.LongTensor`): each element contains the state to be encoded, the range should be [0, num-1]
- num (:obj:`int`): number of states of the one hot encoding
- num_first (:obj:`bool`): If ``num_first`` is False, the one hot encoding is added as the last; \\
Otherwise as the first dimension.
Returns:
- one_hot (:obj:`torch.FloatTensor`)
Example:
>>> one_hot(2*torch.ones([2,2]).long(),3)
tensor([[[0., 0., 1.],
[0., 0., 1.]],
[[0., 0., 1.],
[0., 0., 1.]]])
>>> one_hot(2*torch.ones([2,2]).long(),3,num_first=True)
tensor([[[0., 0.], [1., 0.]],
[[0., 1.], [0., 0.]],
[[1., 0.], [0., 1.]]])
"""
assert isinstance(val, torch.Tensor), type(val)
assert val.dtype == torch.long
assert len(val.shape) >= 1
old_shape = val.shape
val_reshape = val.reshape(-1, 1)
ret = torch.zeros(val_reshape.shape[0], num, device=val.device)
index_neg_one = torch.eq(val_reshape, -1).long()
if index_neg_one.sum() != 0:
val_reshape = torch.where(val_reshape != -1, val_reshape, torch.
zeros(val_reshape.shape, device=val.device).long())
try:
ret.scatter_(1, val_reshape, 1)
if index_neg_one.sum() != 0:
ret = ret * (1 - index_neg_one)
except RuntimeError:
raise RuntimeError('value: {}\nnum: {}\t:val_shape: {}\n'.format(
val_reshape, num, val_reshape.shape))
if num_first:
return ret.permute(1, 0).reshape(num, *old_shape)
else:
return ret.reshape(*old_shape, num)
class LabelSmoothCELoss(nn.Module):
"""
Overview:
Label smooth cross entropy loss.
Interfaces:
forward
"""
def __init__(self, ratio: 'float') ->None:
super().__init__()
self.ratio = ratio
def forward(self, logits: 'torch.Tensor', labels: 'torch.LongTensor'
) ->torch.Tensor:
"""
Overview:
Calculate label smooth cross entropy loss.
Arguments:
- logits (:obj:`torch.Tensor`): Predicted logits.
- labels (:obj:`torch.LongTensor`): Ground truth.
Returns:
- loss (:obj:`torch.Tensor`): Calculated loss.
"""
B, N = logits.shape
val = float(self.ratio) / (N - 1)
one_hot = torch.full_like(logits, val)
one_hot.scatter_(1, labels.unsqueeze(1), 1 - val)
logits = F.log_softmax(logits, dim=1)
return -torch.sum(logits * one_hot.detach()) / B
def get_inputs():
return [torch.rand([4, 4]), torch.ones([4], dtype=torch.int64)]
def get_init_inputs():
return [[], {'ratio': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_div_mul_neg_scatter_sum_1(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
r1 = rindex // 4
r0 = rindex % 4
tmp0 = tl.load(in_ptr0 + r2, None)
tmp1 = tl.load(in_ptr0 + 4 * r1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * r1), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * r1), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * r1), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + r1, None, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp15 = r0
tmp16 = tmp14 == tmp15
tmp17 = -0.33333333333333326
tmp18 = 1.3333333333333333
tmp19 = tl.where(tmp16, tmp17, tmp18)
tmp20 = tmp13 * tmp19
tmp21 = tl.broadcast_to(tmp20, [XBLOCK, RBLOCK])
tmp23 = tl.sum(tmp21, 1)[:, None]
tmp24 = -tmp23
tmp25 = 0.25
tmp26 = tmp24 * tmp25
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp26, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(16)](arg0_1, buf0, 16, XBLOCK=
16, num_warps=1, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused__log_softmax_div_mul_neg_scatter_sum_1[grid(1)](buf2,
buf0, arg1_1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg1_1
del buf0
return buf2,
def one_hot(val: 'torch.LongTensor', num: 'int', num_first: 'bool'=False
) ->torch.FloatTensor:
"""
Overview:
Convert a ``torch.LongTensor`` to one hot encoding.
This implementation can be slightly faster than ``torch.nn.functional.one_hot``
Arguments:
- val (:obj:`torch.LongTensor`): each element contains the state to be encoded, the range should be [0, num-1]
- num (:obj:`int`): number of states of the one hot encoding
- num_first (:obj:`bool`): If ``num_first`` is False, the one hot encoding is added as the last; \\
Otherwise as the first dimension.
Returns:
- one_hot (:obj:`torch.FloatTensor`)
Example:
>>> one_hot(2*torch.ones([2,2]).long(),3)
tensor([[[0., 0., 1.],
[0., 0., 1.]],
[[0., 0., 1.],
[0., 0., 1.]]])
>>> one_hot(2*torch.ones([2,2]).long(),3,num_first=True)
tensor([[[0., 0.], [1., 0.]],
[[0., 1.], [0., 0.]],
[[1., 0.], [0., 1.]]])
"""
assert isinstance(val, torch.Tensor), type(val)
assert val.dtype == torch.long
assert len(val.shape) >= 1
old_shape = val.shape
val_reshape = val.reshape(-1, 1)
ret = torch.zeros(val_reshape.shape[0], num, device=val.device)
index_neg_one = torch.eq(val_reshape, -1).long()
if index_neg_one.sum() != 0:
val_reshape = torch.where(val_reshape != -1, val_reshape, torch.
zeros(val_reshape.shape, device=val.device).long())
try:
ret.scatter_(1, val_reshape, 1)
if index_neg_one.sum() != 0:
ret = ret * (1 - index_neg_one)
except RuntimeError:
raise RuntimeError('value: {}\nnum: {}\t:val_shape: {}\n'.format(
val_reshape, num, val_reshape.shape))
if num_first:
return ret.permute(1, 0).reshape(num, *old_shape)
else:
return ret.reshape(*old_shape, num)
class LabelSmoothCELossNew(nn.Module):
"""
Overview:
Label smooth cross entropy loss.
Interfaces:
forward
"""
def __init__(self, ratio: 'float') ->None:
super().__init__()
self.ratio = ratio
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| Hcnaeg/DI-engine | LabelSmoothCELoss | false | 2,373 | [
"Apache-2.0"
] | 0 | aba0c629f87649854091e9e59d948f83962e3e1e | https://github.com/Hcnaeg/DI-engine/tree/aba0c629f87649854091e9e59d948f83962e3e1e | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
def one_hot(val: 'torch.LongTensor', num: 'int', num_first: 'bool'=False
) ->torch.FloatTensor:
"""
Overview:
Convert a ``torch.LongTensor`` to one hot encoding.
This implementation can be slightly faster than ``torch.nn.functional.one_hot``
Arguments:
- val (:obj:`torch.LongTensor`): each element contains the state to be encoded, the range should be [0, num-1]
- num (:obj:`int`): number of states of the one hot encoding
- num_first (:obj:`bool`): If ``num_first`` is False, the one hot encoding is added as the last; \\
Otherwise as the first dimension.
Returns:
- one_hot (:obj:`torch.FloatTensor`)
Example:
>>> one_hot(2*torch.ones([2,2]).long(),3)
tensor([[[0., 0., 1.],
[0., 0., 1.]],
[[0., 0., 1.],
[0., 0., 1.]]])
>>> one_hot(2*torch.ones([2,2]).long(),3,num_first=True)
tensor([[[0., 0.], [1., 0.]],
[[0., 1.], [0., 0.]],
[[1., 0.], [0., 1.]]])
"""
assert isinstance(val, torch.Tensor), type(val)
assert val.dtype == torch.long
assert len(val.shape) >= 1
old_shape = val.shape
val_reshape = val.reshape(-1, 1)
ret = torch.zeros(val_reshape.shape[0], num, device=val.device)
index_neg_one = torch.eq(val_reshape, -1).long()
if index_neg_one.sum() != 0:
val_reshape = torch.where(val_reshape != -1, val_reshape, torch.
zeros(val_reshape.shape, device=val.device).long())
try:
ret.scatter_(1, val_reshape, 1)
if index_neg_one.sum() != 0:
ret = ret * (1 - index_neg_one)
except RuntimeError:
raise RuntimeError('value: {}\nnum: {}\t:val_shape: {}\n'.format(
val_reshape, num, val_reshape.shape))
if num_first:
return ret.permute(1, 0).reshape(num, *old_shape)
else:
return ret.reshape(*old_shape, num)
class Model(nn.Module):
"""
Overview:
Label smooth cross entropy loss.
Interfaces:
forward
"""
def __init__(self, ratio: 'float') ->None:
super().__init__()
self.ratio = ratio
def forward(self, logits: 'torch.Tensor', labels: 'torch.LongTensor'
) ->torch.Tensor:
"""
Overview:
Calculate label smooth cross entropy loss.
Arguments:
- logits (:obj:`torch.Tensor`): Predicted logits.
- labels (:obj:`torch.LongTensor`): Ground truth.
Returns:
- loss (:obj:`torch.Tensor`): Calculated loss.
"""
B, N = logits.shape
val = float(self.ratio) / (N - 1)
one_hot = torch.full_like(logits, val)
one_hot.scatter_(1, labels.unsqueeze(1), 1 - val)
logits = F.log_softmax(logits, dim=1)
return -torch.sum(logits * one_hot.detach()) / B
def get_inputs():
return [torch.rand([4, 4]), torch.ones([4], dtype=torch.int64)]
def get_init_inputs():
return [4]
|
DiceLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/qz/cqza6p5fjiie2hfiu5dfjqqugrnzziwuwxzlhzy2aa7khopxjbym.py
# Topologically Sorted Source Nodes: [predict], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# predict => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/v4/cv4nyn2kde7dd2c53ddahw4vtxyldln6pqt62jrliqindkf3sj5m.py
# Topologically Sorted Source Nodes: [predict], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# predict => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=4] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/7e/c7e5xjhhnst5z5rbsmctwrwgiwdqyubsrbgy2ujvvf6nflma2ws7.py
# Topologically Sorted Source Nodes: [mul, sum_1, pow_1, pow_2, add_1, sum_2], Original ATen: [aten.mul, aten.sum, aten.pow, aten.add]
# Source node to ATen node mapping:
# add_1 => add_1
# mul => mul
# pow_1 => pow_1
# pow_2 => pow_2
# sum_1 => sum_2
# sum_2 => sum_3
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view, 2), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_1, 2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_1, %pow_2), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%add_1, [1]), kwargs = {})
triton_per_fused_add_mul_pow_sum_2 = async_compile.triton('triton_per_fused_add_mul_pow_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mul_pow_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mul_pow_sum_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + (64*x0)), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tmp0 * tmp0
tmp8 = tmp1 * tmp1
tmp9 = tmp7 + tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.where(xmask, tmp10, 0)
tmp13 = tl.sum(tmp12, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp6, xmask)
tl.store(out_ptr1 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/bf/cbfvj76s5xemb4bs7kwt3su5gmofkie26hpppsikx7ik7sbfiunm.py
# Topologically Sorted Source Nodes: [mul_3, sum_10, pow_7, pow_8, add_11, sum_11], Original ATen: [aten.mul, aten.sum, aten.pow, aten.add]
# Source node to ATen node mapping:
# add_11 => add_13
# mul_3 => mul_3
# pow_7 => pow_7
# pow_8 => pow_8
# sum_10 => sum_11
# sum_11 => sum_12
# Graph fragment:
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_6, %view_7), kwargs = {})
# %sum_11 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_3, [1]), kwargs = {})
# %pow_7 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_6, 2), kwargs = {})
# %pow_8 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_7, 2), kwargs = {})
# %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_7, %pow_8), kwargs = {})
# %sum_12 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%add_13, [1]), kwargs = {})
triton_per_fused_add_mul_pow_sum_3 = async_compile.triton('triton_per_fused_add_mul_pow_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mul_pow_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mul_pow_sum_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (48 + r1 + (64*x0)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (48 + r1 + (64*x0)), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tmp0 * tmp0
tmp8 = tmp1 * tmp1
tmp9 = tmp7 + tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.where(xmask, tmp10, 0)
tmp13 = tl.sum(tmp12, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp6, xmask)
tl.store(out_ptr1 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/u6/cu6mzldqxg26sec4gcx3r552m2kvchtopdml3edcvo2in2ct7bjg.py
# Topologically Sorted Source Nodes: [mul_1, sum_4, pow_3, pow_4, add_5, sum_5], Original ATen: [aten.mul, aten.sum, aten.pow, aten.add]
# Source node to ATen node mapping:
# add_5 => add_5
# mul_1 => mul_1
# pow_3 => pow_3
# pow_4 => pow_4
# sum_4 => sum_5
# sum_5 => sum_6
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, %view_3), kwargs = {})
# %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [1]), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_2, 2), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_3, 2), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_3, %pow_4), kwargs = {})
# %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%add_5, [1]), kwargs = {})
triton_per_fused_add_mul_pow_sum_4 = async_compile.triton('triton_per_fused_add_mul_pow_sum_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mul_pow_sum_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mul_pow_sum_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16 + r1 + (64*x0)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (16 + r1 + (64*x0)), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tmp0 * tmp0
tmp8 = tmp1 * tmp1
tmp9 = tmp7 + tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.where(xmask, tmp10, 0)
tmp13 = tl.sum(tmp12, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp6, xmask)
tl.store(out_ptr1 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/6m/c6mgqj2hrkokpnq6dgwmadtxpefexwavsviyw5qzkfpmafiwfc3s.py
# Topologically Sorted Source Nodes: [mul_2, sum_7, pow_5, pow_6, add_8, sum_8], Original ATen: [aten.mul, aten.sum, aten.pow, aten.add]
# Source node to ATen node mapping:
# add_8 => add_9
# mul_2 => mul_2
# pow_5 => pow_5
# pow_6 => pow_6
# sum_7 => sum_8
# sum_8 => sum_9
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_4, %view_5), kwargs = {})
# %sum_8 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [1]), kwargs = {})
# %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_4, 2), kwargs = {})
# %pow_6 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_5, 2), kwargs = {})
# %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_5, %pow_6), kwargs = {})
# %sum_9 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%add_9, [1]), kwargs = {})
triton_per_fused_add_mul_pow_sum_5 = async_compile.triton('triton_per_fused_add_mul_pow_sum_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mul_pow_sum_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mul_pow_sum_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (32 + r1 + (64*x0)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (32 + r1 + (64*x0)), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tmp0 * tmp0
tmp8 = tmp1 * tmp1
tmp9 = tmp7 + tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.where(xmask, tmp10, 0)
tmp13 = tl.sum(tmp12, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp6, xmask)
tl.store(out_ptr1 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/sc/cscwugalyy5gfd5tjyfzwyjpjqitrek6etl7cganxqccwk6c4cnj.py
# Topologically Sorted Source Nodes: [num, den, truediv, loss, dice_loss, total_loss, num_1, den_1, truediv_1, loss_1, dice_loss_1, total_loss_1, num_2, den_2, truediv_2, loss_2, dice_loss_2, total_loss_2, num_3, den_3, truediv_3, loss_3, dice_loss_3, total_loss_3, truediv_4], Original ATen: [aten.add, aten.div, aten.rsub, aten.sum]
# Source node to ATen node mapping:
# den => add_2
# den_1 => add_6
# den_2 => add_10
# den_3 => add_14
# dice_loss => sum_4
# dice_loss_1 => sum_7
# dice_loss_2 => sum_10
# dice_loss_3 => sum_13
# loss => sub_1
# loss_1 => sub_2
# loss_2 => sub_3
# loss_3 => sub_4
# num => add
# num_1 => add_4
# num_2 => add_8
# num_3 => add_12
# total_loss => add_3
# total_loss_1 => add_7
# total_loss_2 => add_11
# total_loss_3 => add_15
# truediv => div_1
# truediv_1 => div_2
# truediv_2 => div_3
# truediv_3 => div_4
# truediv_4 => div_5
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, 1), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_3, 1), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, %add_2), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div_1), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_1,), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_4, 0), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_5, 1), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_6, 1), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_4, %add_6), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div_2), kwargs = {})
# %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_2,), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %sum_7), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_8, 1), kwargs = {})
# %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_9, 1), kwargs = {})
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_8, %add_10), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div_3), kwargs = {})
# %sum_10 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_3,), kwargs = {})
# %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_7, %sum_10), kwargs = {})
# %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_11, 1), kwargs = {})
# %add_14 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_12, 1), kwargs = {})
# %div_4 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_12, %add_14), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div_4), kwargs = {})
# %sum_13 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_4,), kwargs = {})
# %add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_11, %sum_13), kwargs = {})
# %div_5 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_15, 4), kwargs = {})
triton_per_fused_add_div_rsub_sum_6 = async_compile.triton('triton_per_fused_add_div_rsub_sum_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {9: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=(9,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_rsub_sum_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_rsub_sum_6(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp3 = tl.load(in_ptr1 + (r0), None)
tmp10 = tl.load(in_ptr2 + (r0), None)
tmp12 = tl.load(in_ptr3 + (r0), None)
tmp19 = tl.load(in_ptr4 + (r0), None)
tmp21 = tl.load(in_ptr5 + (r0), None)
tmp28 = tl.load(in_ptr6 + (r0), None)
tmp30 = tl.load(in_ptr7 + (r0), None)
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tmp4 = tmp3 + tmp1
tmp5 = tmp2 / tmp4
tmp6 = tmp1 - tmp5
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.sum(tmp7, 1)[:, None]
tmp11 = tmp10 + tmp1
tmp13 = tmp12 + tmp1
tmp14 = tmp11 / tmp13
tmp15 = tmp1 - tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.sum(tmp16, 1)[:, None]
tmp20 = tmp19 + tmp1
tmp22 = tmp21 + tmp1
tmp23 = tmp20 / tmp22
tmp24 = tmp1 - tmp23
tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK])
tmp27 = tl.sum(tmp25, 1)[:, None]
tmp29 = tmp28 + tmp1
tmp31 = tmp30 + tmp1
tmp32 = tmp29 / tmp31
tmp33 = tmp1 - tmp32
tmp34 = tl.broadcast_to(tmp33, [XBLOCK, RBLOCK])
tmp36 = tl.sum(tmp34, 1)[:, None]
tmp37 = 0.0
tmp38 = tmp9 + tmp37
tmp39 = tmp38 + tmp18
tmp40 = tmp39 + tmp27
tmp41 = tmp40 + tmp36
tmp42 = 0.25
tmp43 = tmp41 * tmp42
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp43, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [predict], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [predict], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
del buf0
buf2 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf3 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [mul, sum_1, pow_1, pow_2, add_1, sum_2], Original ATen: [aten.mul, aten.sum, aten.pow, aten.add]
triton_per_fused_add_mul_pow_sum_2.run(buf1, arg1_1, buf2, buf3, 4, 16, grid=grid(4), stream=stream0)
buf11 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf12 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [mul_3, sum_10, pow_7, pow_8, add_11, sum_11], Original ATen: [aten.mul, aten.sum, aten.pow, aten.add]
triton_per_fused_add_mul_pow_sum_3.run(buf1, arg1_1, buf11, buf12, 4, 16, grid=grid(4), stream=stream0)
buf5 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf6 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [mul_1, sum_4, pow_3, pow_4, add_5, sum_5], Original ATen: [aten.mul, aten.sum, aten.pow, aten.add]
triton_per_fused_add_mul_pow_sum_4.run(buf1, arg1_1, buf5, buf6, 4, 16, grid=grid(4), stream=stream0)
buf8 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf9 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [mul_2, sum_7, pow_5, pow_6, add_8, sum_8], Original ATen: [aten.mul, aten.sum, aten.pow, aten.add]
triton_per_fused_add_mul_pow_sum_5.run(buf1, arg1_1, buf8, buf9, 4, 16, grid=grid(4), stream=stream0)
del arg1_1
del buf1
buf10 = empty_strided_cuda((), (), torch.float32)
buf14 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [num, den, truediv, loss, dice_loss, total_loss, num_1, den_1, truediv_1, loss_1, dice_loss_1, total_loss_1, num_2, den_2, truediv_2, loss_2, dice_loss_2, total_loss_2, num_3, den_3, truediv_3, loss_3, dice_loss_3, total_loss_3, truediv_4], Original ATen: [aten.add, aten.div, aten.rsub, aten.sum]
triton_per_fused_add_div_rsub_sum_6.run(buf14, buf2, buf3, buf5, buf6, buf8, buf9, buf11, buf12, 1, 4, grid=grid(1), stream=stream0)
del buf11
del buf12
del buf2
del buf3
del buf5
del buf6
del buf8
del buf9
return (buf14, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class BinaryDiceLoss(nn.Module):
"""Dice loss of binary class
Args:
smooth: A float number to smooth loss, and avoid NaN error, default: 1
p: Denominator value: \\sum{x^p} + \\sum{y^p}, default: 2
predict: A tensor of shape [N, *]
target: A tensor of shape same with predict
reduction: Reduction method to apply, return mean over batch if 'mean',
return sum if 'sum', return a tensor of shape [N,] if 'none'
Returns:
Loss tensor according to arg reduction
Raise:
Exception if unexpected reduction
"""
def __init__(self, smooth=1, p=2, reduction='sum'):
super(BinaryDiceLoss, self).__init__()
self.smooth = smooth
self.p = p
self.reduction = reduction
def forward(self, predict, target):
assert predict.shape[0] == target.shape[0
], "predict & target batch size don't match"
predict = predict.contiguous().view(predict.shape[0], -1)
target = target.contiguous().view(target.shape[0], -1)
num = torch.sum(torch.mul(predict, target), dim=1) + self.smooth
den = torch.sum(predict.pow(self.p) + target.pow(self.p), dim=1
) + self.smooth
loss = 1 - num / den
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
elif self.reduction == 'none':
return loss
else:
raise Exception('Unexpected reduction {}'.format(self.reduction))
class DiceLoss(nn.Module):
"""Dice loss, need one hot encode input
Args:
weight: An array of shape [num_classes,]
ignore_index: class index to ignore
predict: A tensor of shape [N, C, *]
target: A tensor of same shape with predict
other args pass to BinaryDiceLoss
Return:
same as BinaryDiceLoss
"""
def __init__(self, weight=None, ignore_index=None, **kwargs):
super(DiceLoss, self).__init__()
self.kwargs = kwargs
self.weight = weight
self.ignore_index = ignore_index
def forward(self, predict, target):
assert predict.shape == target.shape, 'predict & target shape do not match'
dice = BinaryDiceLoss(**self.kwargs)
total_loss = 0
predict = F.softmax(predict, dim=1)
for i in range(target.shape[1]):
if i != self.ignore_index:
dice_loss = dice(predict[:, i], target[:, i])
if self.weight is not None:
assert self.weight.shape[0] == target.shape[1
], 'Expect weight shape [{}], get[{}]'.format(target
.shape[1], self.weight.shape[0])
dice_loss *= self.weights[i]
total_loss += dice_loss
return total_loss / target.shape[1]
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_per_fused_add_mul_pow_sum_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tmp0 * tmp0
tmp8 = tmp1 * tmp1
tmp9 = tmp7 + tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.where(xmask, tmp10, 0)
tmp13 = tl.sum(tmp12, 1)[:, None]
tl.store(out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr1 + x0, tmp13, xmask)
@triton.jit
def triton_per_fused_add_mul_pow_sum_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (48 + r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (48 + r1 + 64 * x0), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tmp0 * tmp0
tmp8 = tmp1 * tmp1
tmp9 = tmp7 + tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.where(xmask, tmp10, 0)
tmp13 = tl.sum(tmp12, 1)[:, None]
tl.store(out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr1 + x0, tmp13, xmask)
@triton.jit
def triton_per_fused_add_mul_pow_sum_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16 + r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (16 + r1 + 64 * x0), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tmp0 * tmp0
tmp8 = tmp1 * tmp1
tmp9 = tmp7 + tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.where(xmask, tmp10, 0)
tmp13 = tl.sum(tmp12, 1)[:, None]
tl.store(out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr1 + x0, tmp13, xmask)
@triton.jit
def triton_per_fused_add_mul_pow_sum_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (32 + r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (32 + r1 + 64 * x0), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tmp0 * tmp0
tmp8 = tmp1 * tmp1
tmp9 = tmp7 + tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.where(xmask, tmp10, 0)
tmp13 = tl.sum(tmp12, 1)[:, None]
tl.store(out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr1 + x0, tmp13, xmask)
@triton.jit
def triton_per_fused_add_div_rsub_sum_6(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp10 = tl.load(in_ptr2 + r0, None)
tmp12 = tl.load(in_ptr3 + r0, None)
tmp19 = tl.load(in_ptr4 + r0, None)
tmp21 = tl.load(in_ptr5 + r0, None)
tmp28 = tl.load(in_ptr6 + r0, None)
tmp30 = tl.load(in_ptr7 + r0, None)
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tmp4 = tmp3 + tmp1
tmp5 = tmp2 / tmp4
tmp6 = tmp1 - tmp5
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.sum(tmp7, 1)[:, None]
tmp11 = tmp10 + tmp1
tmp13 = tmp12 + tmp1
tmp14 = tmp11 / tmp13
tmp15 = tmp1 - tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.sum(tmp16, 1)[:, None]
tmp20 = tmp19 + tmp1
tmp22 = tmp21 + tmp1
tmp23 = tmp20 / tmp22
tmp24 = tmp1 - tmp23
tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK])
tmp27 = tl.sum(tmp25, 1)[:, None]
tmp29 = tmp28 + tmp1
tmp31 = tmp30 + tmp1
tmp32 = tmp29 / tmp31
tmp33 = tmp1 - tmp32
tmp34 = tl.broadcast_to(tmp33, [XBLOCK, RBLOCK])
tmp36 = tl.sum(tmp34, 1)[:, None]
tmp37 = 0.0
tmp38 = tmp9 + tmp37
tmp39 = tmp38 + tmp18
tmp40 = tmp39 + tmp27
tmp41 = tmp40 + tmp36
tmp42 = 0.25
tmp43 = tmp41 * tmp42
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp43, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf0, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf0
buf2 = empty_strided_cuda((4,), (1,), torch.float32)
buf3 = empty_strided_cuda((4,), (1,), torch.float32)
triton_per_fused_add_mul_pow_sum_2[grid(4)](buf1, arg1_1, buf2,
buf3, 4, 16, XBLOCK=1, num_warps=2, num_stages=1)
buf11 = empty_strided_cuda((4,), (1,), torch.float32)
buf12 = empty_strided_cuda((4,), (1,), torch.float32)
triton_per_fused_add_mul_pow_sum_3[grid(4)](buf1, arg1_1, buf11,
buf12, 4, 16, XBLOCK=1, num_warps=2, num_stages=1)
buf5 = empty_strided_cuda((4,), (1,), torch.float32)
buf6 = empty_strided_cuda((4,), (1,), torch.float32)
triton_per_fused_add_mul_pow_sum_4[grid(4)](buf1, arg1_1, buf5,
buf6, 4, 16, XBLOCK=1, num_warps=2, num_stages=1)
buf8 = empty_strided_cuda((4,), (1,), torch.float32)
buf9 = empty_strided_cuda((4,), (1,), torch.float32)
triton_per_fused_add_mul_pow_sum_5[grid(4)](buf1, arg1_1, buf8,
buf9, 4, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg1_1
del buf1
buf10 = empty_strided_cuda((), (), torch.float32)
buf14 = buf10
del buf10
triton_per_fused_add_div_rsub_sum_6[grid(1)](buf14, buf2, buf3,
buf5, buf6, buf8, buf9, buf11, buf12, 1, 4, XBLOCK=1, num_warps
=2, num_stages=1)
del buf11
del buf12
del buf2
del buf3
del buf5
del buf6
del buf8
del buf9
return buf14,
class BinaryDiceLoss(nn.Module):
"""Dice loss of binary class
Args:
smooth: A float number to smooth loss, and avoid NaN error, default: 1
p: Denominator value: \\sum{x^p} + \\sum{y^p}, default: 2
predict: A tensor of shape [N, *]
target: A tensor of shape same with predict
reduction: Reduction method to apply, return mean over batch if 'mean',
return sum if 'sum', return a tensor of shape [N,] if 'none'
Returns:
Loss tensor according to arg reduction
Raise:
Exception if unexpected reduction
"""
def __init__(self, smooth=1, p=2, reduction='sum'):
super(BinaryDiceLoss, self).__init__()
self.smooth = smooth
self.p = p
self.reduction = reduction
def forward(self, predict, target):
assert predict.shape[0] == target.shape[0
], "predict & target batch size don't match"
predict = predict.contiguous().view(predict.shape[0], -1)
target = target.contiguous().view(target.shape[0], -1)
num = torch.sum(torch.mul(predict, target), dim=1) + self.smooth
den = torch.sum(predict.pow(self.p) + target.pow(self.p), dim=1
) + self.smooth
loss = 1 - num / den
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
elif self.reduction == 'none':
return loss
else:
raise Exception('Unexpected reduction {}'.format(self.reduction))
class DiceLossNew(nn.Module):
"""Dice loss, need one hot encode input
Args:
weight: An array of shape [num_classes,]
ignore_index: class index to ignore
predict: A tensor of shape [N, C, *]
target: A tensor of same shape with predict
other args pass to BinaryDiceLoss
Return:
same as BinaryDiceLoss
"""
def __init__(self, weight=None, ignore_index=None, **kwargs):
super(DiceLossNew, self).__init__()
self.kwargs = kwargs
self.weight = weight
self.ignore_index = ignore_index
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| Ignas-S/retinanet-simple | DiceLoss | false | 2,374 | [
"Apache-2.0"
] | 0 | 81b17f65fa5278e6b9a4918e6a20b77949a7e87d | https://github.com/Ignas-S/retinanet-simple/tree/81b17f65fa5278e6b9a4918e6a20b77949a7e87d | import torch
import torch.nn as nn
import torch.nn.functional as F
class BinaryDiceLoss(nn.Module):
"""Dice loss of binary class
Args:
smooth: A float number to smooth loss, and avoid NaN error, default: 1
p: Denominator value: \\sum{x^p} + \\sum{y^p}, default: 2
predict: A tensor of shape [N, *]
target: A tensor of shape same with predict
reduction: Reduction method to apply, return mean over batch if 'mean',
return sum if 'sum', return a tensor of shape [N,] if 'none'
Returns:
Loss tensor according to arg reduction
Raise:
Exception if unexpected reduction
"""
def __init__(self, smooth=1, p=2, reduction='sum'):
super().__init__()
self.smooth = smooth
self.p = p
self.reduction = reduction
def forward(self, predict, target):
assert predict.shape[0] == target.shape[0
], "predict & target batch size don't match"
predict = predict.contiguous().view(predict.shape[0], -1)
target = target.contiguous().view(target.shape[0], -1)
num = torch.sum(torch.mul(predict, target), dim=1) + self.smooth
den = torch.sum(predict.pow(self.p) + target.pow(self.p), dim=1
) + self.smooth
loss = 1 - num / den
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
elif self.reduction == 'none':
return loss
else:
raise Exception('Unexpected reduction {}'.format(self.reduction))
class Model(nn.Module):
"""Dice loss, need one hot encode input
Args:
weight: An array of shape [num_classes,]
ignore_index: class index to ignore
predict: A tensor of shape [N, C, *]
target: A tensor of same shape with predict
other args pass to BinaryDiceLoss
Return:
same as BinaryDiceLoss
"""
def __init__(self, weight=None, ignore_index=None, **kwargs):
super().__init__()
self.kwargs = kwargs
self.weight = weight
self.ignore_index = ignore_index
def forward(self, predict, target):
assert predict.shape == target.shape, 'predict & target shape do not match'
dice = BinaryDiceLoss(**self.kwargs)
total_loss = 0
predict = F.softmax(predict, dim=1)
for i in range(target.shape[1]):
if i != self.ignore_index:
dice_loss = dice(predict[:, i], target[:, i])
if self.weight is not None:
assert self.weight.shape[0] == target.shape[1
], 'Expect weight shape [{}], get[{}]'.format(target
.shape[1], self.weight.shape[0])
dice_loss *= self.weights[i]
total_loss += dice_loss
return total_loss / target.shape[1]
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
AttnScore | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/r6/cr6neze6yovkog6kjrk5k2db63h47ozkojywfys6karxe7dlumrz.py
# Topologically Sorted Source Nodes: [attn_weights], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_weights => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [2], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/kj/ckjtlefzavjukjsytvkak6ek26zmzexpcbnlwelx4k5kascjxlf3.py
# Topologically Sorted Source Nodes: [attn_weights], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_weights => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [align], Original ATen: [aten.bmm]
extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (16, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_weights], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf0, buf1, 64, grid=grid(64), stream=stream0)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [attn_weights], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0)
del buf1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
def sequence_mask(lengths, max_len=None):
"""
Creates a boolean mask from sequence lengths.
"""
batch_size = lengths.numel()
max_len = max_len or lengths.max()
return torch.arange(0, max_len).type_as(lengths).repeat(batch_size, 1).lt(
lengths.unsqueeze(1))
class AttnScore(nn.Module):
def __init__(self, input_size, activation=nn.Tanh(), method='dot'):
super(AttnScore, self).__init__()
self.activation = activation
self.input_size = input_size
self.method = method
if method == 'general':
self.linear = nn.Linear(input_size, input_size)
init.uniform(self.linear.weight.data, -0.005, 0.005)
elif method == 'concat':
self.linear_1 = nn.Linear(input_size * 2, input_size)
self.linear_2 = nn.Linear(input_size, 1)
init.uniform(self.linear_1.weight.data, -0.005, 0.005)
init.uniform(self.linear_2.weight.data, -0.005, 0.005)
elif method == 'tri_concat':
self.linear = nn.Linear(input_size * 3, 1)
init.uniform(self.linear.weight.data, -0.005, 0.005)
def forward(self, h1, h2, h1_lens=None, h2_lens=None, normalize=True):
"""
:param h1: b x m x d
:param h2: b x n x d
:return: attn_weights: b x 1 x m
"""
_bsize, seq_l1, _dim = h1.size()
_bsize, seq_l2, _dim = h2.size()
assert h1.size(-1) == self.input_size
assert h2.size(-1) == self.input_size
if self.method == 'dot':
align = h2.bmm(h1.transpose(1, 2))
elif self.method == 'general':
align = h2.bmm(self.linear(h1).transpose(1, 2))
elif self.method == 'concat':
h1 = h1.unsqueeze(1).repeat(1, seq_l2, 1, 1)
h2 = h2.unsqueeze(2).repeat(1, 1, seq_l1, 1)
align = self.linear_2(self.activation(self.linear_1(torch.cat([
h1, h2], dim=3)))).squeeze(-1)
align = F.softmax(align, dim=2)
elif self.method == 'tri_concat':
h1 = h1.unsqueeze(1).repeat(1, seq_l2, 1, 1)
h2 = h2.unsqueeze(2).repeat(1, 1, seq_l1, 1)
align = self.linear(torch.cat([h1, h2, h1 * h2], dim=3)).squeeze(-1
)
if h1_lens is not None:
mask = sequence_mask(h1_lens, max_len=seq_l1).unsqueeze(1)
align.data.masked_fill_(1 - mask, -100000000.0)
if normalize:
attn_weights = F.softmax(align, dim=2)
else:
attn_weights = F.softmax(align, dim=2)
return attn_weights
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.init as init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (
16, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf1
return buf2,
def sequence_mask(lengths, max_len=None):
"""
Creates a boolean mask from sequence lengths.
"""
batch_size = lengths.numel()
max_len = max_len or lengths.max()
return torch.arange(0, max_len).type_as(lengths).repeat(batch_size, 1).lt(
lengths.unsqueeze(1))
class AttnScoreNew(nn.Module):
def __init__(self, input_size, activation=nn.Tanh(), method='dot'):
super(AttnScoreNew, self).__init__()
self.activation = activation
self.input_size = input_size
self.method = method
if method == 'general':
self.linear = nn.Linear(input_size, input_size)
init.uniform(self.linear.weight.data, -0.005, 0.005)
elif method == 'concat':
self.linear_1 = nn.Linear(input_size * 2, input_size)
self.linear_2 = nn.Linear(input_size, 1)
init.uniform(self.linear_1.weight.data, -0.005, 0.005)
init.uniform(self.linear_2.weight.data, -0.005, 0.005)
elif method == 'tri_concat':
self.linear = nn.Linear(input_size * 3, 1)
init.uniform(self.linear.weight.data, -0.005, 0.005)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| IndexFziQ/ASER | AttnScore | false | 2,375 | [
"MIT"
] | 0 | 67dd1a2a25cec175c15675cc1f8a63ca065b447e | https://github.com/IndexFziQ/ASER/tree/67dd1a2a25cec175c15675cc1f8a63ca065b447e | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
def sequence_mask(lengths, max_len=None):
"""
Creates a boolean mask from sequence lengths.
"""
batch_size = lengths.numel()
max_len = max_len or lengths.max()
return torch.arange(0, max_len).type_as(lengths).repeat(batch_size, 1).lt(
lengths.unsqueeze(1))
class Model(nn.Module):
def __init__(self, input_size, activation=nn.Tanh(), method='dot'):
super().__init__()
self.activation = activation
self.input_size = input_size
self.method = method
if method == 'general':
self.linear = nn.Linear(input_size, input_size)
init.uniform(self.linear.weight.data, -0.005, 0.005)
elif method == 'concat':
self.linear_1 = nn.Linear(input_size * 2, input_size)
self.linear_2 = nn.Linear(input_size, 1)
init.uniform(self.linear_1.weight.data, -0.005, 0.005)
init.uniform(self.linear_2.weight.data, -0.005, 0.005)
elif method == 'tri_concat':
self.linear = nn.Linear(input_size * 3, 1)
init.uniform(self.linear.weight.data, -0.005, 0.005)
def forward(self, h1, h2, h1_lens=None, h2_lens=None, normalize=True):
"""
:param h1: b x m x d
:param h2: b x n x d
:return: attn_weights: b x 1 x m
"""
_bsize, seq_l1, _dim = h1.size()
_bsize, seq_l2, _dim = h2.size()
assert h1.size(-1) == self.input_size
assert h2.size(-1) == self.input_size
if self.method == 'dot':
align = h2.bmm(h1.transpose(1, 2))
elif self.method == 'general':
align = h2.bmm(self.linear(h1).transpose(1, 2))
elif self.method == 'concat':
h1 = h1.unsqueeze(1).repeat(1, seq_l2, 1, 1)
h2 = h2.unsqueeze(2).repeat(1, 1, seq_l1, 1)
align = self.linear_2(self.activation(self.linear_1(torch.cat([
h1, h2], dim=3)))).squeeze(-1)
align = F.softmax(align, dim=2)
elif self.method == 'tri_concat':
h1 = h1.unsqueeze(1).repeat(1, seq_l2, 1, 1)
h2 = h2.unsqueeze(2).repeat(1, 1, seq_l1, 1)
align = self.linear(torch.cat([h1, h2, h1 * h2], dim=3)).squeeze(-1
)
if h1_lens is not None:
mask = sequence_mask(h1_lens, max_len=seq_l1).unsqueeze(1)
align.data.masked_fill_(1 - mask, -100000000.0)
if normalize:
attn_weights = F.softmax(align, dim=2)
else:
attn_weights = F.softmax(align, dim=2)
return attn_weights
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [4]
|
CNN64x3 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/oo/coou5wvggqufudaduklp7zml27ok3omo7yvnss7pdphcm5ivci7x.py
# Topologically Sorted Source Nodes: [output, output_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# output => convolution
# output_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 61504
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 3844) % 4
x0 = xindex % 3844
x4 = (xindex // 3844)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + (3872*x4)), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/vd/cvdw7jx6mgetumoskb4iy5mzewggo3cgb7y56aps62yvsbhhwy4w.py
# Topologically Sorted Source Nodes: [output_2], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# output_2 => avg_pool2d
# Graph fragment:
# %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%relu, [5, 5], [3, 3]), kwargs = {})
triton_poi_fused_avg_pool2d_1 = async_compile.triton('triton_poi_fused_avg_pool2d_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 25, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 20
x1 = (xindex // 20) % 20
x2 = (xindex // 400)
x3 = xindex
tmp0 = tl.load(in_ptr0 + ((3*x0) + (186*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (3*x0) + (186*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (3*x0) + (186*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (3*x0) + (186*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (4 + (3*x0) + (186*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (62 + (3*x0) + (186*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (63 + (3*x0) + (186*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (64 + (3*x0) + (186*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (65 + (3*x0) + (186*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (66 + (3*x0) + (186*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (124 + (3*x0) + (186*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (125 + (3*x0) + (186*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (126 + (3*x0) + (186*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (127 + (3*x0) + (186*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (128 + (3*x0) + (186*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (186 + (3*x0) + (186*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr0 + (187 + (3*x0) + (186*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr0 + (188 + (3*x0) + (186*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr0 + (189 + (3*x0) + (186*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr0 + (190 + (3*x0) + (186*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr0 + (248 + (3*x0) + (186*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp41 = tl.load(in_ptr0 + (249 + (3*x0) + (186*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp43 = tl.load(in_ptr0 + (250 + (3*x0) + (186*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp45 = tl.load(in_ptr0 + (251 + (3*x0) + (186*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr0 + (252 + (3*x0) + (186*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp18 = tmp17 + tmp16
tmp20 = tmp19 + tmp18
tmp22 = tmp21 + tmp20
tmp24 = tmp23 + tmp22
tmp26 = tmp25 + tmp24
tmp28 = tmp27 + tmp26
tmp30 = tmp29 + tmp28
tmp32 = tmp31 + tmp30
tmp34 = tmp33 + tmp32
tmp36 = tmp35 + tmp34
tmp38 = tmp37 + tmp36
tmp40 = tmp39 + tmp38
tmp42 = tmp41 + tmp40
tmp44 = tmp43 + tmp42
tmp46 = tmp45 + tmp44
tmp48 = tmp47 + tmp46
tmp49 = 0.04
tmp50 = tmp48 * tmp49
tl.store(out_ptr0 + (x3), tmp50, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 62, 62), (15376, 3844, 62, 1))
buf1 = empty_strided_cuda((4, 4, 62, 62), (15488, 3872, 62, 1), torch.float32)
# Topologically Sorted Source Nodes: [output, output_1], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf0, primals_2, buf1, 61504, grid=grid(61504), stream=stream0)
del buf0
del primals_2
buf2 = empty_strided_cuda((4, 4, 20, 20), (1600, 400, 20, 1), torch.float32)
# Topologically Sorted Source Nodes: [output_2], Original ATen: [aten.avg_pool2d]
triton_poi_fused_avg_pool2d_1.run(buf1, buf2, 6400, grid=grid(6400), stream=stream0)
return (buf2, primals_1, primals_3, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 64, 64), (16384, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class CNN64x3(nn.Module):
def __init__(self, input_channels, output_channels):
super(CNN64x3, self).__init__()
self.conv = nn.Conv2d(in_channels=input_channels, kernel_size=3,
out_channels=output_channels)
self.relu = nn.ReLU()
self.pool = nn.AvgPool2d(5, stride=3, padding=0)
def forward(self, batch_data):
output = self.conv(batch_data)
output = self.relu(output)
output = self.pool(output)
return output
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [[], {'input_channels': 4, 'output_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 61504
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3844 % 4
x0 = xindex % 3844
x4 = xindex // 3844
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + 3872 * x4), tmp4, xmask)
@triton.jit
def triton_poi_fused_avg_pool2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 20
x1 = xindex // 20 % 20
x2 = xindex // 400
x3 = xindex
tmp0 = tl.load(in_ptr0 + (3 * x0 + 186 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 3 * x0 + 186 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 3 * x0 + 186 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 3 * x0 + 186 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (4 + 3 * x0 + 186 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (62 + 3 * x0 + 186 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (63 + 3 * x0 + 186 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (64 + 3 * x0 + 186 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (65 + 3 * x0 + 186 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (66 + 3 * x0 + 186 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (124 + 3 * x0 + 186 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (125 + 3 * x0 + 186 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (126 + 3 * x0 + 186 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (127 + 3 * x0 + 186 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (128 + 3 * x0 + 186 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (186 + 3 * x0 + 186 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp31 = tl.load(in_ptr0 + (187 + 3 * x0 + 186 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp33 = tl.load(in_ptr0 + (188 + 3 * x0 + 186 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp35 = tl.load(in_ptr0 + (189 + 3 * x0 + 186 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp37 = tl.load(in_ptr0 + (190 + 3 * x0 + 186 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp39 = tl.load(in_ptr0 + (248 + 3 * x0 + 186 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp41 = tl.load(in_ptr0 + (249 + 3 * x0 + 186 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp43 = tl.load(in_ptr0 + (250 + 3 * x0 + 186 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp45 = tl.load(in_ptr0 + (251 + 3 * x0 + 186 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp47 = tl.load(in_ptr0 + (252 + 3 * x0 + 186 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp18 = tmp17 + tmp16
tmp20 = tmp19 + tmp18
tmp22 = tmp21 + tmp20
tmp24 = tmp23 + tmp22
tmp26 = tmp25 + tmp24
tmp28 = tmp27 + tmp26
tmp30 = tmp29 + tmp28
tmp32 = tmp31 + tmp30
tmp34 = tmp33 + tmp32
tmp36 = tmp35 + tmp34
tmp38 = tmp37 + tmp36
tmp40 = tmp39 + tmp38
tmp42 = tmp41 + tmp40
tmp44 = tmp43 + tmp42
tmp46 = tmp45 + tmp44
tmp48 = tmp47 + tmp46
tmp49 = 0.04
tmp50 = tmp48 * tmp49
tl.store(out_ptr0 + x3, tmp50, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 62, 62), (15376, 3844, 62, 1))
buf1 = empty_strided_cuda((4, 4, 62, 62), (15488, 3872, 62, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(61504)](buf0, primals_2,
buf1, 61504, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_2
buf2 = empty_strided_cuda((4, 4, 20, 20), (1600, 400, 20, 1), torch
.float32)
triton_poi_fused_avg_pool2d_1[grid(6400)](buf1, buf2, 6400, XBLOCK=
256, num_warps=4, num_stages=1)
return buf2, primals_1, primals_3, buf1
class CNN64x3New(nn.Module):
def __init__(self, input_channels, output_channels):
super(CNN64x3New, self).__init__()
self.conv = nn.Conv2d(in_channels=input_channels, kernel_size=3,
out_channels=output_channels)
self.relu = nn.ReLU()
self.pool = nn.AvgPool2d(5, stride=3, padding=0)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| InExp123/pytorch-self_driving_car | CNN64x3 | false | 2,376 | [
"MIT"
] | 0 | b4e8c8a76079085bf0471dad1820ee9995cffc74 | https://github.com/InExp123/pytorch-self_driving_car/tree/b4e8c8a76079085bf0471dad1820ee9995cffc74 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, input_channels, output_channels):
super().__init__()
self.conv = nn.Conv2d(in_channels=input_channels, kernel_size=3,
out_channels=output_channels)
self.relu = nn.ReLU()
self.pool = nn.AvgPool2d(5, stride=3, padding=0)
def forward(self, batch_data):
output = self.conv(batch_data)
output = self.relu(output)
output = self.pool(output)
return output
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [4, 4]
|
ATOCAttentionUnit | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/us/cus2ucnl3j6ywy7aqunekso7cae73eeifsluocm5svdq3g73qfer.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.sigmoid, aten.sigmoid_backward]
# Source node to ATen node mapping:
# x_5 => sigmoid
# Graph fragment:
# %sigmoid : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_5,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %sub), kwargs = {})
triton_poi_fused_sigmoid_sigmoid_backward_1 = async_compile.triton('triton_poi_fused_sigmoid_sigmoid_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_sigmoid_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_sigmoid_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tmp5 = 1.0
tmp6 = tmp5 - tmp4
tmp7 = tmp4 * tmp6
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_3, buf8, 256, grid=grid(256), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf7, 256, grid=grid(256), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf4 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.sigmoid, aten.sigmoid_backward]
triton_poi_fused_sigmoid_sigmoid_backward_1.run(buf5, primals_7, buf6, 64, grid=grid(64), stream=stream0)
del primals_7
return (reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(buf3, (64, 4), (4, 1), 0), buf6, primals_6, buf7, primals_4, buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from typing import Union
import torch.nn as nn
from typing import Dict
import torch.utils.data
class ATOCAttentionUnit(nn.Module):
"""
Overview:
the attention unit of the atoc network. We now implement it as two-layer MLP, same as the original paper
Interface:
__init__, forward
.. note::
"ATOC paper: We use two-layer MLP to implement the attention unit but it is also can be realized by RNN."
"""
def __init__(self, thought_size: 'int', embedding_size: 'int') ->None:
"""
Overview:
init the attention unit according to the size of input args
Arguments:
- thought_size (:obj:`int`): the size of input thought
- embedding_size (:obj:`int`): the size of hidden layers
"""
super(ATOCAttentionUnit, self).__init__()
self._thought_size = thought_size
self._hidden_size = embedding_size
self._output_size = 1
self._act1 = nn.ReLU()
self._fc1 = nn.Linear(self._thought_size, self._hidden_size, bias=True)
self._fc2 = nn.Linear(self._hidden_size, self._hidden_size, bias=True)
self._fc3 = nn.Linear(self._hidden_size, self._output_size, bias=True)
self._act2 = nn.Sigmoid()
def forward(self, data: 'Union[Dict, torch.Tensor]') ->torch.Tensor:
"""
Overview:
forward method take the thought of agents as input and output the prob of these agent\\
being initiator
Arguments:
- x (:obj:`Union[Dict, torch.Tensor`): the input tensor or dict contain the thoughts tensor
- ret (:obj:`torch.Tensor`): the output initiator prob
"""
x = data
if isinstance(data, Dict):
x = data['thought']
x = self._fc1(x)
x = self._act1(x)
x = self._fc2(x)
x = self._act1(x)
x = self._fc3(x)
x = self._act2(x)
return x.squeeze(-1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'thought_size': 4, 'embedding_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_sigmoid_sigmoid_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tmp5 = 1.0
tmp6 = tmp5 - tmp4
tmp7 = tmp4 * tmp6
tl.store(in_out_ptr0 + x0, tmp4, xmask)
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_3, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3,
primals_5, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf4
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_sigmoid_sigmoid_backward_1[grid(64)](buf5,
primals_7, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_7
return reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(
buf3, (64, 4), (4, 1), 0), buf6, primals_6, buf7, primals_4, buf8
class ATOCAttentionUnitNew(nn.Module):
"""
Overview:
the attention unit of the atoc network. We now implement it as two-layer MLP, same as the original paper
Interface:
__init__, forward
.. note::
"ATOC paper: We use two-layer MLP to implement the attention unit but it is also can be realized by RNN."
"""
def __init__(self, thought_size: 'int', embedding_size: 'int') ->None:
"""
Overview:
init the attention unit according to the size of input args
Arguments:
- thought_size (:obj:`int`): the size of input thought
- embedding_size (:obj:`int`): the size of hidden layers
"""
super(ATOCAttentionUnitNew, self).__init__()
self._thought_size = thought_size
self._hidden_size = embedding_size
self._output_size = 1
self._act1 = nn.ReLU()
self._fc1 = nn.Linear(self._thought_size, self._hidden_size, bias=True)
self._fc2 = nn.Linear(self._hidden_size, self._hidden_size, bias=True)
self._fc3 = nn.Linear(self._hidden_size, self._output_size, bias=True)
self._act2 = nn.Sigmoid()
def forward(self, input_0):
primals_2 = self._fc1.weight
primals_3 = self._fc1.bias
primals_4 = self._fc2.weight
primals_5 = self._fc2.bias
primals_6 = self._fc3.weight
primals_7 = self._fc3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| Hcnaeg/DI-engine | ATOCAttentionUnit | false | 2,377 | [
"Apache-2.0"
] | 0 | aba0c629f87649854091e9e59d948f83962e3e1e | https://github.com/Hcnaeg/DI-engine/tree/aba0c629f87649854091e9e59d948f83962e3e1e | import torch
from typing import Union
import torch.nn as nn
from typing import Dict
import torch.utils.data
class Model(nn.Module):
"""
Overview:
the attention unit of the atoc network. We now implement it as two-layer MLP, same as the original paper
Interface:
__init__, forward
.. note::
"ATOC paper: We use two-layer MLP to implement the attention unit but it is also can be realized by RNN."
"""
def __init__(self, thought_size: 'int', embedding_size: 'int') ->None:
"""
Overview:
init the attention unit according to the size of input args
Arguments:
- thought_size (:obj:`int`): the size of input thought
- embedding_size (:obj:`int`): the size of hidden layers
"""
super().__init__()
self._thought_size = thought_size
self._hidden_size = embedding_size
self._output_size = 1
self._act1 = nn.ReLU()
self._fc1 = nn.Linear(self._thought_size, self._hidden_size, bias=True)
self._fc2 = nn.Linear(self._hidden_size, self._hidden_size, bias=True)
self._fc3 = nn.Linear(self._hidden_size, self._output_size, bias=True)
self._act2 = nn.Sigmoid()
def forward(self, data: 'Union[Dict, torch.Tensor]') ->torch.Tensor:
"""
Overview:
forward method take the thought of agents as input and output the prob of these agent\\
being initiator
Arguments:
- x (:obj:`Union[Dict, torch.Tensor`): the input tensor or dict contain the thoughts tensor
- ret (:obj:`torch.Tensor`): the output initiator prob
"""
x = data
if isinstance(data, Dict):
x = data['thought']
x = self._fc1(x)
x = self._act1(x)
x = self._fc2(x)
x = self._act1(x)
x = self._fc3(x)
x = self._act2(x)
return x.squeeze(-1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
PositionwiseFeedForward | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/df/cdfcie57v6pcdd6oeaz4mvlgksxgyuxzmlv5bklwemyulqhtcxta.py
# Topologically Sorted Source Nodes: [mean, std, sub, mul, add, truediv, add_1], Original ATen: [aten.mean, aten.std, aten.sub, aten.mul, aten.add, aten.div]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# mean => mean
# mul => mul
# std => sqrt, var
# sub => sub
# truediv => div
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1], True), kwargs = {})
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [-1]), kwargs = {correction: 1.0, keepdim: True})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %sub), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-06), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %add), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %primals_3), kwargs = {})
triton_poi_fused_add_div_mean_mul_std_sub_0 = async_compile.triton('triton_poi_fused_add_div_mean_mul_std_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_std_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp0 * tmp11
tmp13 = tmp2 - tmp10
tmp14 = tmp13 * tmp13
tmp15 = tmp3 - tmp10
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tmp5 - tmp10
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp10
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = 3.0
tmp25 = tmp23 / tmp24
tmp26 = libdevice.sqrt(tmp25)
tmp27 = 1e-06
tmp28 = tmp26 + tmp27
tmp29 = tmp12 / tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + (x2), tmp31, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/mp/cmpdsbnpgfsr7uwb7env74mojrq3nlzieqot6rnnkfpbzkkensbi.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/rg/crgv7fpu5yp7bran3e6f2xvynemba7inixjeyvqbl75yk7rlqbnp.py
# Topologically Sorted Source Nodes: [add_2], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add_2 => add_2
# Graph fragment:
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_1), kwargs = {})
triton_poi_fused_add_2 = async_compile.triton('triton_poi_fused_add_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean, std, sub, mul, add, truediv, add_1], Original ATen: [aten.mean, aten.std, aten.sub, aten.mul, aten.add, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_std_sub_0.run(primals_2, primals_1, primals_3, buf0, 256, grid=grid(256), stream=stream0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf2, primals_5, buf5, 256, grid=grid(256), stream=stream0)
del primals_5
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [add_2], Original ATen: [aten.add]
triton_poi_fused_add_2.run(buf4, primals_7, primals_1, 256, grid=grid(256), stream=stream0)
del primals_7
return (buf4, primals_1, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_6, buf5, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class LayerNorm(nn.Module):
"""
Layer Normalization class
"""
def __init__(self, features, eps=1e-06):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class PositionwiseFeedForward(nn.Module):
""" A two-layer Feed-Forward-Network with residual layer norm.
Args:
d_model (int): the size of input for the first-layer of the FFN.
d_ff (int): the hidden layer size of the second-layer
of the FNN.
dropout (float): dropout probability(0-1.0).
"""
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.layer_norm = LayerNorm(d_model)
self.dropout_1 = nn.Dropout(dropout)
self.relu = nn.ReLU()
self.dropout_2 = nn.Dropout(dropout)
def forward(self, x):
"""
Layer definition.
Args:
input: [ batch_size, input_len, model_dim ]
Returns:
output: [ batch_size, input_len, model_dim ]
"""
inter = self.dropout_1(self.relu(self.w_1(self.layer_norm(x))))
output = self.dropout_2(self.w_2(inter))
return output + x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'd_ff': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp0 * tmp11
tmp13 = tmp2 - tmp10
tmp14 = tmp13 * tmp13
tmp15 = tmp3 - tmp10
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tmp5 - tmp10
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp10
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = 3.0
tmp25 = tmp23 / tmp24
tmp26 = libdevice.sqrt(tmp25)
tmp27 = 1e-06
tmp28 = tmp26 + tmp27
tmp29 = tmp12 / tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + x2, tmp31, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_std_sub_0[grid(256)](primals_2,
primals_1, primals_3, buf0, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_2
del primals_3
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(256)](buf2,
primals_5, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
triton_poi_fused_add_2[grid(256)](buf4, primals_7, primals_1, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
return buf4, primals_1, reinterpret_tensor(buf0, (64, 4), (4, 1), 0
), reinterpret_tensor(buf2, (64, 4), (4, 1), 0
), primals_6, buf5, primals_4
class LayerNorm(nn.Module):
"""
Layer Normalization class
"""
def __init__(self, features, eps=1e-06):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class PositionwiseFeedForwardNew(nn.Module):
""" A two-layer Feed-Forward-Network with residual layer norm.
Args:
d_model (int): the size of input for the first-layer of the FFN.
d_ff (int): the hidden layer size of the second-layer
of the FNN.
dropout (float): dropout probability(0-1.0).
"""
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForwardNew, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.layer_norm = LayerNorm(d_model)
self.dropout_1 = nn.Dropout(dropout)
self.relu = nn.ReLU()
self.dropout_2 = nn.Dropout(dropout)
def forward(self, input_0):
primals_4 = self.w_1.weight
primals_2 = self.w_1.bias
primals_6 = self.w_2.weight
primals_3 = self.w_2.bias
primals_5 = self.layer_norm.a_2
primals_7 = self.layer_norm.b_2
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| IndexFziQ/ASER | PositionwiseFeedForward | false | 2,378 | [
"MIT"
] | 0 | 67dd1a2a25cec175c15675cc1f8a63ca065b447e | https://github.com/IndexFziQ/ASER/tree/67dd1a2a25cec175c15675cc1f8a63ca065b447e | import torch
import torch.nn as nn
class LayerNorm(nn.Module):
"""
Layer Normalization class
"""
def __init__(self, features, eps=1e-06):
super().__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class Model(nn.Module):
""" A two-layer Feed-Forward-Network with residual layer norm.
Args:
d_model (int): the size of input for the first-layer of the FFN.
d_ff (int): the hidden layer size of the second-layer
of the FNN.
dropout (float): dropout probability(0-1.0).
"""
def __init__(self, d_model, d_ff, dropout=0.1):
super().__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.layer_norm = LayerNorm(d_model)
self.dropout_1 = nn.Dropout(dropout)
self.relu = nn.ReLU()
self.dropout_2 = nn.Dropout(dropout)
def forward(self, x):
"""
Layer definition.
Args:
input: [ batch_size, input_len, model_dim ]
Returns:
output: [ batch_size, input_len, model_dim ]
"""
inter = self.dropout_1(self.relu(self.w_1(self.layer_norm(x))))
output = self.dropout_2(self.w_2(inter))
return output + x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
LinearFBSP | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/2p/c2pchd2kzossvpziwqzrjzzaysevgxgdpgzifsch2c4bim4lzcra.py
# Topologically Sorted Source Nodes: [linspace], Original ATen: [aten.linspace]
# Source node to ATen node mapping:
# linspace => iota, lt
# Graph fragment:
# %iota : [num_users=3] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %lt : [num_users=2] = call_function[target=torch.ops.aten.lt.Scalar](args = (%iota, 2.0), kwargs = {})
triton_poi_fused_linspace_0 = async_compile.triton('triton_poi_fused_linspace_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_linspace_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_linspace_0(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 2.0
tmp3 = tmp1 < tmp2
tl.store(out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/3p/c3pvbauci65lr34aea5n2qcmy3ev76kgerp6hibnhxqedgmmgn32.py
# Topologically Sorted Source Nodes: [kernel], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# kernel => cat
# Graph fragment:
# %cat : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%cos, %neg], -1), kwargs = {})
triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x2 = (xindex // 8)
x1 = (xindex // 2) % 4
x3 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = x1
tmp7 = tmp6.to(tl.float32)
tmp8 = 2.0
tmp9 = tmp7 < tmp8
tmp10 = 0.6666666666666666
tmp11 = tmp7 * tmp10
tmp12 = -1.0
tmp13 = tmp11 + tmp12
tmp14 = 3 + ((-1)*x1)
tmp15 = tmp14.to(tl.float32)
tmp16 = tmp15 * tmp10
tmp17 = 1.0
tmp18 = tmp17 - tmp16
tmp19 = tl.where(tmp9, tmp13, tmp18)
tmp20 = 3.141592653589793
tmp21 = tmp19 * tmp20
tmp22 = 1e-08
tmp23 = tmp21 + tmp22
tmp24 = tmp5 * tmp23
tmp25 = tl_math.cos(tmp24)
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp4, tmp25, tmp26)
tmp28 = tmp0 >= tmp3
tmp29 = tl.full([1], 2, tl.int64)
tmp30 = tmp0 < tmp29
tmp31 = tl.load(in_ptr0 + (x2), tmp28 & xmask, eviction_policy='evict_last', other=0.0)
tmp32 = tmp31 * tmp23
tmp33 = tl_math.sin(tmp32)
tmp34 = -tmp33
tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype)
tmp36 = tl.where(tmp28, tmp34, tmp35)
tmp37 = tl.where(tmp4, tmp27, tmp36)
tl.store(out_ptr0 + (x3), tmp37, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/gp/cgpwn725ul52vyy53yiuwgvtwigdrb5aa4kddyhwc7dgiyd33pxo.py
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_1 => cat_1
# Graph fragment:
# %cat_1 : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%where_1, %full_default_1], -1), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x2 = (xindex // 8)
x1 = (xindex // 2) % 4
x3 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = x1
tmp7 = tmp6.to(tl.float32)
tmp8 = 2.0
tmp9 = tmp7 < tmp8
tmp10 = 0.6666666666666666
tmp11 = tmp7 * tmp10
tmp12 = -1.0
tmp13 = tmp11 + tmp12
tmp14 = 3 + ((-1)*x1)
tmp15 = tmp14.to(tl.float32)
tmp16 = tmp15 * tmp10
tmp17 = 1.0
tmp18 = tmp17 - tmp16
tmp19 = tl.where(tmp9, tmp13, tmp18)
tmp20 = 3.141592653589793
tmp21 = tmp19 * tmp20
tmp22 = 1e-08
tmp23 = tmp21 + tmp22
tmp24 = tmp5 * tmp23
tmp25 = tl.load(in_ptr1 + (x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp26 = tmp25 + tmp22
tmp27 = tmp24 / tmp26
tmp28 = 0.0
tmp29 = tmp27 == tmp28
tmp30 = tl_math.sin(tmp27)
tmp31 = tmp30 / tmp27
tmp32 = tl.where(tmp29, tmp17, tmp31)
tmp33 = tl.full(tmp32.shape, 0.0, tmp32.dtype)
tmp34 = tl.where(tmp4, tmp32, tmp33)
tmp35 = tmp0 >= tmp3
tmp36 = tl.full([1], 2, tl.int64)
tmp37 = tmp0 < tmp36
tmp38 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp39 = tl.where(tmp35, tmp28, tmp38)
tmp40 = tl.where(tmp4, tmp34, tmp39)
tl.store(out_ptr0 + (x3), tmp40, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ga/cgahmtbihboplqjxssaronlub2urumgsh6gnsokxhaj753dzuwhh.py
# Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_2 => cat_2
# Graph fragment:
# %cat_2 : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%view_1, %full_default_2], -1), kwargs = {})
triton_poi_fused_cat_3 = async_compile.triton('triton_poi_fused_cat_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = 0.0
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp6, tmp9, tmp10)
tmp12 = tl.where(tmp4, tmp5, tmp11)
tl.store(out_ptr0 + (x2), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ux/cux3edk6fmp7xbq5rqn3xo6rjamwcodnvsqca2zzhh2ilxatwfkg.py
# Topologically Sorted Source Nodes: [stack, win_1], Original ATen: [aten.stack, aten.mul]
# Source node to ATen node mapping:
# stack => cat_3
# win_1 => mul_15
# Graph fragment:
# %cat_3 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze_1, %unsqueeze_2], -1), kwargs = {})
# %mul_15 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze, %cat_3), kwargs = {})
triton_poi_fused_mul_stack_4 = async_compile.triton('triton_poi_fused_mul_stack_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_stack_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_stack_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x2 = (xindex // 8)
x4 = (xindex // 2)
x5 = xindex
tmp46 = tl.load(in_ptr1 + (2*x4), xmask, eviction_policy='evict_last')
tmp48 = tl.load(in_ptr1 + (1 + (2*x4)), xmask, eviction_policy='evict_last')
tmp53 = tl.load(in_ptr0 + (2*x2), xmask, eviction_policy='evict_last')
tmp56 = tl.load(in_ptr0 + (1 + (2*x2)), xmask, eviction_policy='evict_last')
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (2*x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (1 + (2*x4)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tl.load(in_ptr1 + (2*x4), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp8 = libdevice.atan2(tmp6, tmp7)
tmp9 = tmp5 * tmp8
tmp10 = tl.load(in_ptr0 + (1 + (2*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = 0.5
tmp12 = tmp10 * tmp11
tmp13 = tmp7 * tmp7
tmp14 = tmp6 * tmp6
tmp15 = tmp13 + tmp14
tmp16 = libdevice.sqrt(tmp15)
tmp17 = tmp16 * tmp16
tmp18 = tl_math.log(tmp17)
tmp19 = tmp12 * tmp18
tmp20 = tmp9 + tmp19
tmp21 = tl_math.cos(tmp20)
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp4, tmp21, tmp22)
tmp24 = tmp0 >= tmp3
tmp25 = tl.full([1], 2, tl.int64)
tmp26 = tmp0 < tmp25
tmp27 = tl.load(in_ptr0 + (2*x2), tmp24 & xmask, eviction_policy='evict_last', other=0.0)
tmp28 = tl.load(in_ptr1 + (1 + (2*x4)), tmp24 & xmask, eviction_policy='evict_last', other=0.0)
tmp29 = tl.load(in_ptr1 + (2*x4), tmp24 & xmask, eviction_policy='evict_last', other=0.0)
tmp30 = libdevice.atan2(tmp28, tmp29)
tmp31 = tmp27 * tmp30
tmp32 = tl.load(in_ptr0 + (1 + (2*x2)), tmp24 & xmask, eviction_policy='evict_last', other=0.0)
tmp33 = tmp32 * tmp11
tmp34 = tmp29 * tmp29
tmp35 = tmp28 * tmp28
tmp36 = tmp34 + tmp35
tmp37 = libdevice.sqrt(tmp36)
tmp38 = tmp37 * tmp37
tmp39 = tl_math.log(tmp38)
tmp40 = tmp33 * tmp39
tmp41 = tmp31 + tmp40
tmp42 = tl_math.sin(tmp41)
tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype)
tmp44 = tl.where(tmp24, tmp42, tmp43)
tmp45 = tl.where(tmp4, tmp23, tmp44)
tmp47 = tmp46 * tmp46
tmp49 = tmp48 * tmp48
tmp50 = tmp47 + tmp49
tmp51 = libdevice.sqrt(tmp50)
tmp52 = tmp51 * tmp51
tmp54 = tmp53 * tmp11
tmp55 = libdevice.pow(tmp52, tmp54)
tmp57 = -tmp56
tmp58 = libdevice.atan2(tmp48, tmp46)
tmp59 = tmp57 * tmp58
tmp60 = tl_math.exp(tmp59)
tmp61 = tmp55 * tmp60
tmp62 = tmp61 * tmp45
tl.store(out_ptr0 + (x5), tmp45, xmask)
tl.store(out_ptr1 + (x5), tmp62, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/va/cva2d2qcmnc5fzisj6xbaxmgnzymk266anxegk4e7ujrg73xwc4q.py
# Topologically Sorted Source Nodes: [scale, cat_3, weights], Original ATen: [aten.sqrt, aten.cat, aten.mul]
# Source node to ATen node mapping:
# cat_3 => cat_4
# scale => sqrt
# weights => mul_20
# Graph fragment:
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%view_2,), kwargs = {})
# %cat_4 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%sub_2, %add_6], -1), kwargs = {})
# %mul_20 : [num_users=3] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sqrt, %cat_4), kwargs = {})
triton_poi_fused_cat_mul_sqrt_5 = async_compile.triton('triton_poi_fused_cat_mul_sqrt_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_mul_sqrt_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_mul_sqrt_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2)
x4 = xindex
x3 = (xindex // 8)
tmp27 = tl.load(in_ptr2 + (x3), xmask, eviction_policy='evict_last')
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (2*x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (2*x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 * tmp6
tmp8 = tl.load(in_ptr0 + (1 + (2*x1)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp9 = tl.load(in_ptr1 + (1 + (2*x1)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tmp8 * tmp9
tmp11 = tmp7 - tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tmp15 = tl.full([1], 2, tl.int64)
tmp16 = tmp0 < tmp15
tmp17 = tl.load(in_ptr0 + (2*x1), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tl.load(in_ptr1 + (1 + (2*x1)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp19 = tmp17 * tmp18
tmp20 = tl.load(in_ptr0 + (1 + (2*x1)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = tl.load(in_ptr1 + (2*x1), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp22 = tmp20 * tmp21
tmp23 = tmp19 + tmp22
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp14, tmp23, tmp24)
tmp26 = tl.where(tmp4, tmp13, tmp25)
tmp28 = libdevice.sqrt(tmp27)
tmp29 = tmp28 * tmp26
tl.store(out_ptr0 + (x4), tmp26, xmask)
tl.store(out_ptr1 + (x4), tmp29, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/74/c74zbfg3tfn4q7o5zem3awoefkezwu3czoteaqazwcuoa4pbpikg.py
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
# Source node to ATen node mapping:
# linear => mm
# Graph fragment:
# %mm : [num_users=1] = call_function[target=torch.ops.aten.mm.default](args = (%view_4, %permute), kwargs = {})
triton_poi_fused_mm_6 = async_compile.triton('triton_poi_fused_mm_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mm_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (2*x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/f6/cf63icjghppuwwqvlzsnyu7p2bpjtd4a66qc5oh7k2gvlfdd37is.py
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
# Source node to ATen node mapping:
# linear_1 => mm_1
# Graph fragment:
# %mm_1 : [num_users=1] = call_function[target=torch.ops.aten.mm.default](args = (%view_4, %permute_1), kwargs = {})
triton_poi_fused_mm_7 = async_compile.triton('triton_poi_fused_mm_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mm_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mm_7(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (1 + (2*x0)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/av/cavfvqrwpqvbne4yrk2ditloma2qnmkbde46ibblhmvqah57gjha.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.stack, aten.add]
# Source node to ATen node mapping:
# x => cat_5
# x_1 => add_7
# Graph fragment:
# %cat_5 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze_3, %unsqueeze_4], -1), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%cat_5, %primals_5), kwargs = {})
triton_poi_fused_add_stack_8 = async_compile.triton('triton_poi_fused_add_stack_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_stack_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_stack_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x3 = (xindex // 2)
x4 = xindex % 8
x5 = xindex
tmp11 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last')
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x3), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x3), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tmp12 = tmp10 + tmp11
tl.store(out_ptr0 + (x5), tmp12, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 2), (2, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, ), (1, ), torch.bool)
# Topologically Sorted Source Nodes: [linspace], Original ATen: [aten.linspace]
stream0 = get_raw_stream(0)
triton_poi_fused_linspace_0.run(buf0, 4, grid=grid(4), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [kernel], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(primals_4, buf1, 32, grid=grid(32), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(primals_3, primals_2, buf2, 32, grid=grid(32), stream=stream0)
buf3 = empty_strided_cuda((4, 1, 2), (2, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat]
triton_poi_fused_cat_3.run(primals_2, buf3, 8, grid=grid(8), stream=stream0)
buf4 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [stack, win_1], Original ATen: [aten.stack, aten.mul]
triton_poi_fused_mul_stack_4.run(buf3, buf2, buf4, buf5, 32, grid=grid(32), stream=stream0)
buf6 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [scale, cat_3, weights], Original ATen: [aten.sqrt, aten.cat, aten.mul]
triton_poi_fused_cat_mul_sqrt_5.run(buf5, buf1, primals_3, buf6, buf7, 32, grid=grid(32), stream=stream0)
del buf5
buf8 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
triton_poi_fused_mm_6.run(buf7, buf8, 16, grid=grid(16), stream=stream0)
buf9 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf8, out=buf9)
buf10 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
triton_poi_fused_mm_7.run(buf7, buf10, 16, grid=grid(16), stream=stream0)
buf11 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf10, out=buf11)
del buf10
buf12 = empty_strided_cuda((4, 4, 4, 4, 2), (128, 32, 8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.stack, aten.add]
triton_poi_fused_add_stack_8.run(buf9, buf11, primals_5, buf12, 512, grid=grid(512), stream=stream0)
del buf11
del buf9
del primals_5
return (buf12, buf7, primals_2, primals_3, primals_4, buf0, buf1, buf2, buf3, buf4, buf6, reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 2), (2, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
from typing import Tuple
import torch.nn.functional as F
from typing import cast
def scale(old_value, old_min, old_max, new_min, new_max):
old_range = old_max - old_min
new_range = new_max - new_min
new_value = (old_value - old_min) * new_range / old_range + new_min
return new_value
class LinearFBSP(torch.nn.Module):
def __init__(self, out_features: 'int', bias: 'bool'=True, normalized:
'bool'=False):
super(LinearFBSP, self).__init__()
self.out_features = out_features
self.normalized = normalized
self.eps = 1e-08
default_dtype = torch.get_default_dtype()
self.register_parameter('m', torch.nn.Parameter(torch.zeros(self.
out_features, dtype=default_dtype)))
self.register_parameter('fb', torch.nn.Parameter(torch.ones(self.
out_features, dtype=default_dtype)))
self.register_parameter('fc', torch.nn.Parameter(torch.arange(self.
out_features, dtype=default_dtype)))
self.register_parameter('bias', torch.nn.Parameter(torch.normal(0.0,
0.5, (self.out_features, 2), dtype=default_dtype) if bias else
cast(torch.nn.Parameter, None)))
self.m.register_hook(lambda grad: grad / (torch.norm(grad, p=float(
'inf')) + self.eps))
self.fb.register_hook(lambda grad: grad / (torch.norm(grad, p=float
('inf')) + self.eps))
self.fc.register_hook(lambda grad: grad / (torch.norm(grad, p=float
('inf')) + self.eps))
@staticmethod
def power(x1: 'torch.Tensor', x2: 'torch.Tensor') ->torch.Tensor:
magnitudes = (x1[..., 0] ** 2 + x1[..., 1] ** 2) ** 0.5
phases = x1[..., 1].atan2(x1[..., 0])
power_real = x2[..., 0]
power_imag = x2[..., 1]
mag_out = (magnitudes ** 2) ** (0.5 * power_real) * torch.exp(-
power_imag * phases)
return mag_out.unsqueeze(-1) * torch.stack(((power_real * phases +
0.5 * power_imag * (magnitudes ** 2).log()).cos(), (power_real *
phases + 0.5 * power_imag * (magnitudes ** 2).log()).sin()), dim=-1
)
@staticmethod
def sinc(x: 'torch.Tensor') ->torch.Tensor:
return torch.where(cast(torch.Tensor, x == 0), torch.ones_like(x),
torch.sin(x) / x)
def _materialize_weights(self, x: 'torch.Tensor') ->Tuple[torch.Tensor,
bool]:
x_is_complex = x.shape[-1] == 2
in_features = x.shape[-1 - int(x_is_complex)]
t = np.pi * torch.linspace(-1.0, 1.0, in_features, dtype=x.dtype,
device=x.device).reshape(1, -1, 1) + self.eps
m = self.m.reshape(-1, 1, 1)
fb = self.fb.reshape(-1, 1, 1)
fc = self.fc.reshape(-1, 1, 1)
kernel = torch.cat((torch.cos(fc * t), -torch.sin(fc * t)), dim=-1)
scale = fb.sqrt()
win = self.sinc(fb * t / (m + self.eps))
win = self.power(torch.cat((win, torch.zeros_like(win)), dim=-1),
torch.cat((m, torch.zeros_like(m)), dim=-1))
weights = scale * torch.cat((win[..., :1] * kernel[..., :1] - win[
..., 1:] * kernel[..., 1:], win[..., :1] * kernel[..., 1:] +
win[..., 1:] * kernel[..., :1]), dim=-1)
if self.normalized:
weights = weights / in_features ** 0.5
return weights, x_is_complex
def forward(self, x: 'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor]:
weights, x_is_complex = self._materialize_weights(x)
if x_is_complex:
x = torch.stack((F.linear(x[..., 0], weights[..., 0]) - F.
linear(x[..., 1], weights[..., 1]), F.linear(x[..., 0],
weights[..., 1]) + F.linear(x[..., 1], weights[..., 0])),
dim=-1)
else:
x = torch.stack((F.linear(x, weights[..., 0]), F.linear(x,
weights[..., 1])), dim=-1)
if self.bias is not None and self.bias.numel(
) == self.out_features * 2:
x = x + self.bias
return x, weights
def extra_repr(self) ->str:
return 'out_features={}, bias={}, normalized={}'.format(self.
out_features, self.bias is not None and self.bias.numel() ==
self.out_features * 2, self.normalized)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import numpy as np
from typing import Tuple
from typing import cast
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_linspace_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 2.0
tmp3 = tmp1 < tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x2 = xindex // 8
x1 = xindex // 2 % 4
x3 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x2, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = x1
tmp7 = tmp6.to(tl.float32)
tmp8 = 2.0
tmp9 = tmp7 < tmp8
tmp10 = 0.6666666666666666
tmp11 = tmp7 * tmp10
tmp12 = -1.0
tmp13 = tmp11 + tmp12
tmp14 = 3 + -1 * x1
tmp15 = tmp14.to(tl.float32)
tmp16 = tmp15 * tmp10
tmp17 = 1.0
tmp18 = tmp17 - tmp16
tmp19 = tl.where(tmp9, tmp13, tmp18)
tmp20 = 3.141592653589793
tmp21 = tmp19 * tmp20
tmp22 = 1e-08
tmp23 = tmp21 + tmp22
tmp24 = tmp5 * tmp23
tmp25 = tl_math.cos(tmp24)
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp4, tmp25, tmp26)
tmp28 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp31 = tl.load(in_ptr0 + x2, tmp28 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp32 = tmp31 * tmp23
tmp33 = tl_math.sin(tmp32)
tmp34 = -tmp33
tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype)
tmp36 = tl.where(tmp28, tmp34, tmp35)
tmp37 = tl.where(tmp4, tmp27, tmp36)
tl.store(out_ptr0 + x3, tmp37, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x2 = xindex // 8
x1 = xindex // 2 % 4
x3 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x2, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = x1
tmp7 = tmp6.to(tl.float32)
tmp8 = 2.0
tmp9 = tmp7 < tmp8
tmp10 = 0.6666666666666666
tmp11 = tmp7 * tmp10
tmp12 = -1.0
tmp13 = tmp11 + tmp12
tmp14 = 3 + -1 * x1
tmp15 = tmp14.to(tl.float32)
tmp16 = tmp15 * tmp10
tmp17 = 1.0
tmp18 = tmp17 - tmp16
tmp19 = tl.where(tmp9, tmp13, tmp18)
tmp20 = 3.141592653589793
tmp21 = tmp19 * tmp20
tmp22 = 1e-08
tmp23 = tmp21 + tmp22
tmp24 = tmp5 * tmp23
tmp25 = tl.load(in_ptr1 + x2, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp26 = tmp25 + tmp22
tmp27 = tmp24 / tmp26
tmp28 = 0.0
tmp29 = tmp27 == tmp28
tmp30 = tl_math.sin(tmp27)
tmp31 = tmp30 / tmp27
tmp32 = tl.where(tmp29, tmp17, tmp31)
tmp33 = tl.full(tmp32.shape, 0.0, tmp32.dtype)
tmp34 = tl.where(tmp4, tmp32, tmp33)
tmp35 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp38 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp39 = tl.where(tmp35, tmp28, tmp38)
tmp40 = tl.where(tmp4, tmp34, tmp39)
tl.store(out_ptr0 + x3, tmp40, xmask)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp9 = 0.0
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp6, tmp9, tmp10)
tmp12 = tl.where(tmp4, tmp5, tmp11)
tl.store(out_ptr0 + x2, tmp12, xmask)
@triton.jit
def triton_poi_fused_mul_stack_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x2 = xindex // 8
x4 = xindex // 2
x5 = xindex
tmp46 = tl.load(in_ptr1 + 2 * x4, xmask, eviction_policy='evict_last')
tmp48 = tl.load(in_ptr1 + (1 + 2 * x4), xmask, eviction_policy='evict_last'
)
tmp53 = tl.load(in_ptr0 + 2 * x2, xmask, eviction_policy='evict_last')
tmp56 = tl.load(in_ptr0 + (1 + 2 * x2), xmask, eviction_policy='evict_last'
)
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + 2 * x2, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (1 + 2 * x4), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tl.load(in_ptr1 + 2 * x4, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp8 = libdevice.atan2(tmp6, tmp7)
tmp9 = tmp5 * tmp8
tmp10 = tl.load(in_ptr0 + (1 + 2 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = 0.5
tmp12 = tmp10 * tmp11
tmp13 = tmp7 * tmp7
tmp14 = tmp6 * tmp6
tmp15 = tmp13 + tmp14
tmp16 = libdevice.sqrt(tmp15)
tmp17 = tmp16 * tmp16
tmp18 = tl_math.log(tmp17)
tmp19 = tmp12 * tmp18
tmp20 = tmp9 + tmp19
tmp21 = tl_math.cos(tmp20)
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp4, tmp21, tmp22)
tmp24 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp27 = tl.load(in_ptr0 + 2 * x2, tmp24 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp28 = tl.load(in_ptr1 + (1 + 2 * x4), tmp24 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp29 = tl.load(in_ptr1 + 2 * x4, tmp24 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp30 = libdevice.atan2(tmp28, tmp29)
tmp31 = tmp27 * tmp30
tmp32 = tl.load(in_ptr0 + (1 + 2 * x2), tmp24 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp33 = tmp32 * tmp11
tmp34 = tmp29 * tmp29
tmp35 = tmp28 * tmp28
tmp36 = tmp34 + tmp35
tmp37 = libdevice.sqrt(tmp36)
tmp38 = tmp37 * tmp37
tmp39 = tl_math.log(tmp38)
tmp40 = tmp33 * tmp39
tmp41 = tmp31 + tmp40
tmp42 = tl_math.sin(tmp41)
tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype)
tmp44 = tl.where(tmp24, tmp42, tmp43)
tmp45 = tl.where(tmp4, tmp23, tmp44)
tmp47 = tmp46 * tmp46
tmp49 = tmp48 * tmp48
tmp50 = tmp47 + tmp49
tmp51 = libdevice.sqrt(tmp50)
tmp52 = tmp51 * tmp51
tmp54 = tmp53 * tmp11
tmp55 = libdevice.pow(tmp52, tmp54)
tmp57 = -tmp56
tmp58 = libdevice.atan2(tmp48, tmp46)
tmp59 = tmp57 * tmp58
tmp60 = tl_math.exp(tmp59)
tmp61 = tmp55 * tmp60
tmp62 = tmp61 * tmp45
tl.store(out_ptr0 + x5, tmp45, xmask)
tl.store(out_ptr1 + x5, tmp62, xmask)
@triton.jit
def triton_poi_fused_cat_mul_sqrt_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x4 = xindex
x3 = xindex // 8
tmp27 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last')
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + 2 * x1, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + 2 * x1, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tmp5 * tmp6
tmp8 = tl.load(in_ptr0 + (1 + 2 * x1), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp9 = tl.load(in_ptr1 + (1 + 2 * x1), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp10 = tmp8 * tmp9
tmp11 = tmp7 - tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp17 = tl.load(in_ptr0 + 2 * x1, tmp14 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp18 = tl.load(in_ptr1 + (1 + 2 * x1), tmp14 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp19 = tmp17 * tmp18
tmp20 = tl.load(in_ptr0 + (1 + 2 * x1), tmp14 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp21 = tl.load(in_ptr1 + 2 * x1, tmp14 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp22 = tmp20 * tmp21
tmp23 = tmp19 + tmp22
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp14, tmp23, tmp24)
tmp26 = tl.where(tmp4, tmp13, tmp25)
tmp28 = libdevice.sqrt(tmp27)
tmp29 = tmp28 * tmp26
tl.store(out_ptr0 + x4, tmp26, xmask)
tl.store(out_ptr1 + x4, tmp29, xmask)
@triton.jit
def triton_poi_fused_mm_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 2 * x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_mm_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (1 + 2 * x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_stack_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x3 = xindex // 2
x4 = xindex % 8
x5 = xindex
tmp11 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x3, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp9 = tl.load(in_ptr1 + x3, tmp6 & xmask, eviction_policy='evict_last',
other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tmp12 = tmp10 + tmp11
tl.store(out_ptr0 + x5, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 2), (2, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.bool)
get_raw_stream(0)
triton_poi_fused_linspace_0[grid(4)](buf0, 4, XBLOCK=4, num_warps=1,
num_stages=1)
buf1 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
triton_poi_fused_cat_1[grid(32)](primals_4, buf1, 32, XBLOCK=32,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
triton_poi_fused_cat_2[grid(32)](primals_3, primals_2, buf2, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 1, 2), (2, 2, 1), torch.float32)
triton_poi_fused_cat_3[grid(8)](primals_2, buf3, 8, XBLOCK=8,
num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
triton_poi_fused_mul_stack_4[grid(32)](buf3, buf2, buf4, buf5, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
triton_poi_fused_cat_mul_sqrt_5[grid(32)](buf5, buf1, primals_3,
buf6, buf7, 32, XBLOCK=32, num_warps=1, num_stages=1)
del buf5
buf8 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
triton_poi_fused_mm_6[grid(16)](buf7, buf8, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf9 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
buf8, out=buf9)
buf10 = buf8
del buf8
triton_poi_fused_mm_7[grid(16)](buf7, buf10, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf11 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
buf10, out=buf11)
del buf10
buf12 = empty_strided_cuda((4, 4, 4, 4, 2), (128, 32, 8, 2, 1),
torch.float32)
triton_poi_fused_add_stack_8[grid(512)](buf9, buf11, primals_5,
buf12, 512, XBLOCK=256, num_warps=4, num_stages=1)
del buf11
del buf9
del primals_5
return (buf12, buf7, primals_2, primals_3, primals_4, buf0, buf1, buf2,
buf3, buf4, buf6, reinterpret_tensor(primals_1, (4, 64), (1, 4), 0))
def scale(old_value, old_min, old_max, new_min, new_max):
old_range = old_max - old_min
new_range = new_max - new_min
new_value = (old_value - old_min) * new_range / old_range + new_min
return new_value
class LinearFBSPNew(torch.nn.Module):
def __init__(self, out_features: 'int', bias: 'bool'=True, normalized:
'bool'=False):
super(LinearFBSPNew, self).__init__()
self.out_features = out_features
self.normalized = normalized
self.eps = 1e-08
default_dtype = torch.get_default_dtype()
self.register_parameter('m', torch.nn.Parameter(torch.zeros(self.
out_features, dtype=default_dtype)))
self.register_parameter('fb', torch.nn.Parameter(torch.ones(self.
out_features, dtype=default_dtype)))
self.register_parameter('fc', torch.nn.Parameter(torch.arange(self.
out_features, dtype=default_dtype)))
self.register_parameter('bias', torch.nn.Parameter(torch.normal(0.0,
0.5, (self.out_features, 2), dtype=default_dtype) if bias else
cast(torch.nn.Parameter, None)))
self.m.register_hook(lambda grad: grad / (torch.norm(grad, p=float(
'inf')) + self.eps))
self.fb.register_hook(lambda grad: grad / (torch.norm(grad, p=float
('inf')) + self.eps))
self.fc.register_hook(lambda grad: grad / (torch.norm(grad, p=float
('inf')) + self.eps))
@staticmethod
def power(x1: 'torch.Tensor', x2: 'torch.Tensor') ->torch.Tensor:
magnitudes = (x1[..., 0] ** 2 + x1[..., 1] ** 2) ** 0.5
phases = x1[..., 1].atan2(x1[..., 0])
power_real = x2[..., 0]
power_imag = x2[..., 1]
mag_out = (magnitudes ** 2) ** (0.5 * power_real) * torch.exp(-
power_imag * phases)
return mag_out.unsqueeze(-1) * torch.stack(((power_real * phases +
0.5 * power_imag * (magnitudes ** 2).log()).cos(), (power_real *
phases + 0.5 * power_imag * (magnitudes ** 2).log()).sin()), dim=-1
)
@staticmethod
def sinc(x: 'torch.Tensor') ->torch.Tensor:
return torch.where(cast(torch.Tensor, x == 0), torch.ones_like(x),
torch.sin(x) / x)
def _materialize_weights(self, x: 'torch.Tensor') ->Tuple[torch.Tensor,
bool]:
x_is_complex = x.shape[-1] == 2
in_features = x.shape[-1 - int(x_is_complex)]
t = np.pi * torch.linspace(-1.0, 1.0, in_features, dtype=x.dtype,
device=x.device).reshape(1, -1, 1) + self.eps
m = self.m.reshape(-1, 1, 1)
fb = self.fb.reshape(-1, 1, 1)
fc = self.fc.reshape(-1, 1, 1)
kernel = torch.cat((torch.cos(fc * t), -torch.sin(fc * t)), dim=-1)
scale = fb.sqrt()
win = self.sinc(fb * t / (m + self.eps))
win = self.power(torch.cat((win, torch.zeros_like(win)), dim=-1),
torch.cat((m, torch.zeros_like(m)), dim=-1))
weights = scale * torch.cat((win[..., :1] * kernel[..., :1] - win[
..., 1:] * kernel[..., 1:], win[..., :1] * kernel[..., 1:] +
win[..., 1:] * kernel[..., :1]), dim=-1)
if self.normalized:
weights = weights / in_features ** 0.5
return weights, x_is_complex
def extra_repr(self) ->str:
return 'out_features={}, bias={}, normalized={}'.format(self.
out_features, self.bias is not None and self.bias.numel() ==
self.out_features * 2, self.normalized)
def forward(self, input_0):
primals_2 = self.m
primals_3 = self.fb
primals_4 = self.fc
primals_5 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1]
| Gikiman/executors | LinearFBSP | false | 2,379 | [
"Apache-2.0"
] | 0 | 98658b4136859164390cfccbde8cf0f7cf843593 | https://github.com/Gikiman/executors/tree/98658b4136859164390cfccbde8cf0f7cf843593 | import torch
import numpy as np
from typing import Tuple
import torch.nn.functional as F
from typing import cast
def scale(old_value, old_min, old_max, new_min, new_max):
old_range = old_max - old_min
new_range = new_max - new_min
new_value = (old_value - old_min) * new_range / old_range + new_min
return new_value
class Model(torch.nn.Module):
def __init__(self, out_features: 'int', bias: 'bool'=True, normalized:
'bool'=False):
super().__init__()
self.out_features = out_features
self.normalized = normalized
self.eps = 1e-08
default_dtype = torch.get_default_dtype()
self.register_parameter('m', torch.nn.Parameter(torch.zeros(self.
out_features, dtype=default_dtype)))
self.register_parameter('fb', torch.nn.Parameter(torch.ones(self.
out_features, dtype=default_dtype)))
self.register_parameter('fc', torch.nn.Parameter(torch.arange(self.
out_features, dtype=default_dtype)))
self.register_parameter('bias', torch.nn.Parameter(torch.normal(0.0,
0.5, (self.out_features, 2), dtype=default_dtype) if bias else
cast(torch.nn.Parameter, None)))
self.m.register_hook(lambda grad: grad / (torch.norm(grad, p=float(
'inf')) + self.eps))
self.fb.register_hook(lambda grad: grad / (torch.norm(grad, p=float
('inf')) + self.eps))
self.fc.register_hook(lambda grad: grad / (torch.norm(grad, p=float
('inf')) + self.eps))
@staticmethod
def power(x1: 'torch.Tensor', x2: 'torch.Tensor') ->torch.Tensor:
magnitudes = (x1[..., 0] ** 2 + x1[..., 1] ** 2) ** 0.5
phases = x1[..., 1].atan2(x1[..., 0])
power_real = x2[..., 0]
power_imag = x2[..., 1]
mag_out = (magnitudes ** 2) ** (0.5 * power_real) * torch.exp(-
power_imag * phases)
return mag_out.unsqueeze(-1) * torch.stack(((power_real * phases +
0.5 * power_imag * (magnitudes ** 2).log()).cos(), (power_real *
phases + 0.5 * power_imag * (magnitudes ** 2).log()).sin()), dim=-1
)
@staticmethod
def sinc(x: 'torch.Tensor') ->torch.Tensor:
return torch.where(cast(torch.Tensor, x == 0), torch.ones_like(x),
torch.sin(x) / x)
def _materialize_weights(self, x: 'torch.Tensor') ->Tuple[torch.Tensor,
bool]:
x_is_complex = x.shape[-1] == 2
in_features = x.shape[-1 - int(x_is_complex)]
t = np.pi * torch.linspace(-1.0, 1.0, in_features, dtype=x.dtype,
device=x.device).reshape(1, -1, 1) + self.eps
m = self.m.reshape(-1, 1, 1)
fb = self.fb.reshape(-1, 1, 1)
fc = self.fc.reshape(-1, 1, 1)
kernel = torch.cat((torch.cos(fc * t), -torch.sin(fc * t)), dim=-1)
scale = fb.sqrt()
win = self.sinc(fb * t / (m + self.eps))
win = self.power(torch.cat((win, torch.zeros_like(win)), dim=-1),
torch.cat((m, torch.zeros_like(m)), dim=-1))
weights = scale * torch.cat((win[..., :1] * kernel[..., :1] - win[
..., 1:] * kernel[..., 1:], win[..., :1] * kernel[..., 1:] +
win[..., 1:] * kernel[..., :1]), dim=-1)
if self.normalized:
weights = weights / in_features ** 0.5
return weights, x_is_complex
def forward(self, x: 'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor]:
weights, x_is_complex = self._materialize_weights(x)
if x_is_complex:
x = torch.stack((F.linear(x[..., 0], weights[..., 0]) - F.
linear(x[..., 1], weights[..., 1]), F.linear(x[..., 0],
weights[..., 1]) + F.linear(x[..., 1], weights[..., 0])),
dim=-1)
else:
x = torch.stack((F.linear(x, weights[..., 0]), F.linear(x,
weights[..., 1])), dim=-1)
if self.bias is not None and self.bias.numel(
) == self.out_features * 2:
# ... truncated (>4000 chars) for memory efficiency |
GLU | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/6v/c6vzcw3gyn5uqhyxbbwmpum2zzhvhs66tjq2oznzcap5zo7izpvb.py
# Topologically Sorted Source Nodes: [gate_1, x], Original ATen: [aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# gate_1 => sigmoid
# x => mul
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %primals_4), kwargs = {})
triton_poi_fused_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_mul_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [gate], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [gate_1, x], Original ATen: [aten.sigmoid, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0.run(buf0, primals_4, buf1, 256, grid=grid(256), stream=stream0)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_6
return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class GLU(nn.Module):
"""
Overview:
Gating Linear Unit.
This class does a thing like this:
.. code:: python
# Inputs: input, context, output_size
# The gate value is a learnt function of the input.
gate = sigmoid(linear(input.size)(context))
# Gate the input and return an output of desired size.
gated_input = gate * input
output = linear(output_size)(gated_input)
return output
Interfaces:
forward
.. tip::
This module also supports 2D convolution, in which case, the input and context must have the same shape.
"""
def __init__(self, input_dim: 'int', output_dim: 'int', context_dim:
'int', input_type: 'str'='fc') ->None:
"""
Overview:
Init GLU
Arguments:
- input_dim (:obj:`int`): the input dimension
- output_dim (:obj:`int`): the output dimension
- context_dim (:obj:`int`): the context dimension
- input_type (:obj:`str`): the type of input, now support ['fc', 'conv2d']
"""
super(GLU, self).__init__()
assert input_type in ['fc', 'conv2d']
if input_type == 'fc':
self.layer1 = nn.Linear(context_dim, input_dim)
self.layer2 = nn.Linear(input_dim, output_dim)
elif input_type == 'conv2d':
self.layer1 = nn.Conv2d(context_dim, input_dim, 1, 1, 0)
self.layer2 = nn.Conv2d(input_dim, output_dim, 1, 1, 0)
def forward(self, x: 'torch.Tensor', context: 'torch.Tensor'
) ->torch.Tensor:
"""
Overview:
Return GLU computed tensor
Arguments:
- x (:obj:`torch.Tensor`) : the input tensor
- context (:obj:`torch.Tensor`) : the context tensor
Returns:
- x (:obj:`torch.Tensor`): the computed tensor
"""
gate = self.layer1(context)
gate = torch.sigmoid(gate)
x = gate * x
x = self.layer2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4, 'context_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0[grid(256)](buf0, primals_4, buf1,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_6
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_5
class GLUNew(nn.Module):
"""
Overview:
Gating Linear Unit.
This class does a thing like this:
.. code:: python
# Inputs: input, context, output_size
# The gate value is a learnt function of the input.
gate = sigmoid(linear(input.size)(context))
# Gate the input and return an output of desired size.
gated_input = gate * input
output = linear(output_size)(gated_input)
return output
Interfaces:
forward
.. tip::
This module also supports 2D convolution, in which case, the input and context must have the same shape.
"""
def __init__(self, input_dim: 'int', output_dim: 'int', context_dim:
'int', input_type: 'str'='fc') ->None:
"""
Overview:
Init GLU
Arguments:
- input_dim (:obj:`int`): the input dimension
- output_dim (:obj:`int`): the output dimension
- context_dim (:obj:`int`): the context dimension
- input_type (:obj:`str`): the type of input, now support ['fc', 'conv2d']
"""
super(GLUNew, self).__init__()
assert input_type in ['fc', 'conv2d']
if input_type == 'fc':
self.layer1 = nn.Linear(context_dim, input_dim)
self.layer2 = nn.Linear(input_dim, output_dim)
elif input_type == 'conv2d':
self.layer1 = nn.Conv2d(context_dim, input_dim, 1, 1, 0)
self.layer2 = nn.Conv2d(input_dim, output_dim, 1, 1, 0)
def forward(self, input_0, input_1):
primals_1 = self.layer1.weight
primals_2 = self.layer1.bias
primals_5 = self.layer2.weight
primals_6 = self.layer2.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| Hcnaeg/DI-engine | GLU | false | 2,380 | [
"Apache-2.0"
] | 0 | aba0c629f87649854091e9e59d948f83962e3e1e | https://github.com/Hcnaeg/DI-engine/tree/aba0c629f87649854091e9e59d948f83962e3e1e | import torch
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
"""
Overview:
Gating Linear Unit.
This class does a thing like this:
.. code:: python
# Inputs: input, context, output_size
# The gate value is a learnt function of the input.
gate = sigmoid(linear(input.size)(context))
# Gate the input and return an output of desired size.
gated_input = gate * input
output = linear(output_size)(gated_input)
return output
Interfaces:
forward
.. tip::
This module also supports 2D convolution, in which case, the input and context must have the same shape.
"""
def __init__(self, input_dim: 'int', output_dim: 'int', context_dim:
'int', input_type: 'str'='fc') ->None:
"""
Overview:
Init GLU
Arguments:
- input_dim (:obj:`int`): the input dimension
- output_dim (:obj:`int`): the output dimension
- context_dim (:obj:`int`): the context dimension
- input_type (:obj:`str`): the type of input, now support ['fc', 'conv2d']
"""
super().__init__()
assert input_type in ['fc', 'conv2d']
if input_type == 'fc':
self.layer1 = nn.Linear(context_dim, input_dim)
self.layer2 = nn.Linear(input_dim, output_dim)
elif input_type == 'conv2d':
self.layer1 = nn.Conv2d(context_dim, input_dim, 1, 1, 0)
self.layer2 = nn.Conv2d(input_dim, output_dim, 1, 1, 0)
def forward(self, x: 'torch.Tensor', context: 'torch.Tensor'
) ->torch.Tensor:
"""
Overview:
Return GLU computed tensor
Arguments:
- x (:obj:`torch.Tensor`) : the input tensor
- context (:obj:`torch.Tensor`) : the context tensor
Returns:
- x (:obj:`torch.Tensor`): the computed tensor
"""
gate = self.layer1(context)
gate = torch.sigmoid(gate)
x = gate * x
x = self.layer2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
Attention | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/r6/cr6neze6yovkog6kjrk5k2db63h47ozkojywfys6karxe7dlumrz.py
# Topologically Sorted Source Nodes: [attn_weights], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_weights => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [2], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/kj/ckjtlefzavjukjsytvkak6ek26zmzexpcbnlwelx4k5kascjxlf3.py
# Topologically Sorted Source Nodes: [attn_weights], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_weights => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [align], Original ATen: [aten.bmm]
extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (16, 1, 4), 0), out=buf0)
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_weights], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf0, buf1, 64, grid=grid(64), stream=stream0)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [attn_weights], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0)
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [contexts], Original ATen: [aten.bmm]
extern_kernels.bmm(buf2, arg0_1, out=buf3)
del arg0_1
return (buf3, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
def sequence_mask(lengths, max_len=None):
"""
Creates a boolean mask from sequence lengths.
"""
batch_size = lengths.numel()
max_len = max_len or lengths.max()
return torch.arange(0, max_len).type_as(lengths).repeat(batch_size, 1).lt(
lengths.unsqueeze(1))
class AttnScore(nn.Module):
def __init__(self, input_size, activation=nn.Tanh(), method='dot'):
super(AttnScore, self).__init__()
self.activation = activation
self.input_size = input_size
self.method = method
if method == 'general':
self.linear = nn.Linear(input_size, input_size)
init.uniform(self.linear.weight.data, -0.005, 0.005)
elif method == 'concat':
self.linear_1 = nn.Linear(input_size * 2, input_size)
self.linear_2 = nn.Linear(input_size, 1)
init.uniform(self.linear_1.weight.data, -0.005, 0.005)
init.uniform(self.linear_2.weight.data, -0.005, 0.005)
elif method == 'tri_concat':
self.linear = nn.Linear(input_size * 3, 1)
init.uniform(self.linear.weight.data, -0.005, 0.005)
def forward(self, h1, h2, h1_lens=None, h2_lens=None, normalize=True):
"""
:param h1: b x m x d
:param h2: b x n x d
:return: attn_weights: b x 1 x m
"""
_bsize, seq_l1, _dim = h1.size()
_bsize, seq_l2, _dim = h2.size()
assert h1.size(-1) == self.input_size
assert h2.size(-1) == self.input_size
if self.method == 'dot':
align = h2.bmm(h1.transpose(1, 2))
elif self.method == 'general':
align = h2.bmm(self.linear(h1).transpose(1, 2))
elif self.method == 'concat':
h1 = h1.unsqueeze(1).repeat(1, seq_l2, 1, 1)
h2 = h2.unsqueeze(2).repeat(1, 1, seq_l1, 1)
align = self.linear_2(self.activation(self.linear_1(torch.cat([
h1, h2], dim=3)))).squeeze(-1)
align = F.softmax(align, dim=2)
elif self.method == 'tri_concat':
h1 = h1.unsqueeze(1).repeat(1, seq_l2, 1, 1)
h2 = h2.unsqueeze(2).repeat(1, 1, seq_l1, 1)
align = self.linear(torch.cat([h1, h2, h1 * h2], dim=3)).squeeze(-1
)
if h1_lens is not None:
mask = sequence_mask(h1_lens, max_len=seq_l1).unsqueeze(1)
align.data.masked_fill_(1 - mask, -100000000.0)
if normalize:
attn_weights = F.softmax(align, dim=2)
else:
attn_weights = F.softmax(align, dim=2)
return attn_weights
class Attention(nn.Module):
def __init__(self, input_size, activation=nn.Tanh(), method='dot'):
super(Attention, self).__init__()
self.attn_score = AttnScore(input_size=input_size, activation=
activation, method=method)
def forward(self, query, keys, q_lens=None, k_lens=None):
"""
:param query: bsize x query_num x input_size
:param keys: bsize x key_num x input_size
:param q_lens: bsize x query_num
:param k_lens: bsize x key_num
:return: bsize x 1 x input_size
"""
attn_weights = self.attn_score(keys, query, k_lens, q_lens)
contexts = attn_weights.bmm(keys)
return contexts, attn_weights
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (
16, 1, 4), 0), out=buf0)
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = buf1
del buf1
extern_kernels.bmm(buf2, arg0_1, out=buf3)
del arg0_1
return buf3, buf2
def sequence_mask(lengths, max_len=None):
"""
Creates a boolean mask from sequence lengths.
"""
batch_size = lengths.numel()
max_len = max_len or lengths.max()
return torch.arange(0, max_len).type_as(lengths).repeat(batch_size, 1).lt(
lengths.unsqueeze(1))
class AttnScore(nn.Module):
def __init__(self, input_size, activation=nn.Tanh(), method='dot'):
super(AttnScore, self).__init__()
self.activation = activation
self.input_size = input_size
self.method = method
if method == 'general':
self.linear = nn.Linear(input_size, input_size)
init.uniform(self.linear.weight.data, -0.005, 0.005)
elif method == 'concat':
self.linear_1 = nn.Linear(input_size * 2, input_size)
self.linear_2 = nn.Linear(input_size, 1)
init.uniform(self.linear_1.weight.data, -0.005, 0.005)
init.uniform(self.linear_2.weight.data, -0.005, 0.005)
elif method == 'tri_concat':
self.linear = nn.Linear(input_size * 3, 1)
init.uniform(self.linear.weight.data, -0.005, 0.005)
def forward(self, h1, h2, h1_lens=None, h2_lens=None, normalize=True):
"""
:param h1: b x m x d
:param h2: b x n x d
:return: attn_weights: b x 1 x m
"""
_bsize, seq_l1, _dim = h1.size()
_bsize, seq_l2, _dim = h2.size()
assert h1.size(-1) == self.input_size
assert h2.size(-1) == self.input_size
if self.method == 'dot':
align = h2.bmm(h1.transpose(1, 2))
elif self.method == 'general':
align = h2.bmm(self.linear(h1).transpose(1, 2))
elif self.method == 'concat':
h1 = h1.unsqueeze(1).repeat(1, seq_l2, 1, 1)
h2 = h2.unsqueeze(2).repeat(1, 1, seq_l1, 1)
align = self.linear_2(self.activation(self.linear_1(torch.cat([
h1, h2], dim=3)))).squeeze(-1)
align = F.softmax(align, dim=2)
elif self.method == 'tri_concat':
h1 = h1.unsqueeze(1).repeat(1, seq_l2, 1, 1)
h2 = h2.unsqueeze(2).repeat(1, 1, seq_l1, 1)
align = self.linear(torch.cat([h1, h2, h1 * h2], dim=3)).squeeze(-1
)
if h1_lens is not None:
mask = sequence_mask(h1_lens, max_len=seq_l1).unsqueeze(1)
align.data.masked_fill_(1 - mask, -100000000.0)
if normalize:
attn_weights = F.softmax(align, dim=2)
else:
attn_weights = F.softmax(align, dim=2)
return attn_weights
class AttentionNew(nn.Module):
def __init__(self, input_size, activation=nn.Tanh(), method='dot'):
super(AttentionNew, self).__init__()
self.attn_score = AttnScore(input_size=input_size, activation=
activation, method=method)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0], output[1]
| IndexFziQ/ASER | Attention | false | 2,381 | [
"MIT"
] | 0 | 67dd1a2a25cec175c15675cc1f8a63ca065b447e | https://github.com/IndexFziQ/ASER/tree/67dd1a2a25cec175c15675cc1f8a63ca065b447e | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
def sequence_mask(lengths, max_len=None):
"""
Creates a boolean mask from sequence lengths.
"""
batch_size = lengths.numel()
max_len = max_len or lengths.max()
return torch.arange(0, max_len).type_as(lengths).repeat(batch_size, 1).lt(
lengths.unsqueeze(1))
class AttnScore(nn.Module):
def __init__(self, input_size, activation=nn.Tanh(), method='dot'):
super().__init__()
self.activation = activation
self.input_size = input_size
self.method = method
if method == 'general':
self.linear = nn.Linear(input_size, input_size)
init.uniform(self.linear.weight.data, -0.005, 0.005)
elif method == 'concat':
self.linear_1 = nn.Linear(input_size * 2, input_size)
self.linear_2 = nn.Linear(input_size, 1)
init.uniform(self.linear_1.weight.data, -0.005, 0.005)
init.uniform(self.linear_2.weight.data, -0.005, 0.005)
elif method == 'tri_concat':
self.linear = nn.Linear(input_size * 3, 1)
init.uniform(self.linear.weight.data, -0.005, 0.005)
def forward(self, h1, h2, h1_lens=None, h2_lens=None, normalize=True):
"""
:param h1: b x m x d
:param h2: b x n x d
:return: attn_weights: b x 1 x m
"""
_bsize, seq_l1, _dim = h1.size()
_bsize, seq_l2, _dim = h2.size()
assert h1.size(-1) == self.input_size
assert h2.size(-1) == self.input_size
if self.method == 'dot':
align = h2.bmm(h1.transpose(1, 2))
elif self.method == 'general':
align = h2.bmm(self.linear(h1).transpose(1, 2))
elif self.method == 'concat':
h1 = h1.unsqueeze(1).repeat(1, seq_l2, 1, 1)
h2 = h2.unsqueeze(2).repeat(1, 1, seq_l1, 1)
align = self.linear_2(self.activation(self.linear_1(torch.cat([
h1, h2], dim=3)))).squeeze(-1)
align = F.softmax(align, dim=2)
elif self.method == 'tri_concat':
h1 = h1.unsqueeze(1).repeat(1, seq_l2, 1, 1)
h2 = h2.unsqueeze(2).repeat(1, 1, seq_l1, 1)
align = self.linear(torch.cat([h1, h2, h1 * h2], dim=3)).squeeze(-1
)
if h1_lens is not None:
mask = sequence_mask(h1_lens, max_len=seq_l1).unsqueeze(1)
align.data.masked_fill_(1 - mask, -100000000.0)
if normalize:
attn_weights = F.softmax(align, dim=2)
else:
attn_weights = F.softmax(align, dim=2)
return attn_weights
class Model(nn.Module):
def __init__(self, input_size, activation=nn.Tanh(), method='dot'):
super().__init__()
self.attn_score = AttnScore(input_size=input_size, activation=
activation, method=method)
def forward(self, query, keys, q_lens=None, k_lens=None):
"""
:param query: bsize x query_num x input_size
:param keys: bsize x key_num x input_size
:param q_lens: bsize x query_num
:param k_lens: bsize x key_num
:return: bsize x 1 x input_size
"""
attn_weights = self.attn_score(keys, query, k_lens, q_lens)
contexts = attn_weights.bmm(keys)
return contexts, attn_weights
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [4]
|
SpatialGate3D | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/bb/cbbrf3z5zs3mby26g37h3hqek6jb44pbydizc4mmbq52gwghiytt.py
# Topologically Sorted Source Nodes: [x_compress], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_compress => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze, %unsqueeze_1], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2097152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 262144) % 2
x0 = xindex % 262144
x2 = (xindex // 524288)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (524288*x2)), tmp4, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (262144 + x0 + (524288*x2)), tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 2, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tl.load(in_ptr0 + (x0 + (524288*x2)), tmp10, eviction_policy='evict_last', other=0.0)
tmp14 = tl.load(in_ptr0 + (262144 + x0 + (524288*x2)), tmp10, eviction_policy='evict_last', other=0.0)
tmp15 = tmp13 + tmp14
tmp16 = 2.0
tmp17 = tmp15 / tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp10, tmp17, tmp18)
tmp20 = tl.where(tmp4, tmp9, tmp19)
tl.store(out_ptr0 + (x3), tmp20, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/vp/cvpig5tiioo5gjhy5u4vxwsuxiinwoihxai3joaon64zjwyec7iz.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.repeat]
# Source node to ATen node mapping:
# x_1 => repeat
# Graph fragment:
# %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_3, [4]), kwargs = {})
triton_poi_fused_repeat_1 = async_compile.triton('triton_poi_fused_repeat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_repeat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_repeat_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tl.store(out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/bx/cbxfs6slagmrrwwqr4aswhlzhjxr5ysvyvaom4sgva4am55s3lhy.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# x_1 => var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3, 4]), kwargs = {correction: 0, keepdim: True})
triton_red_fused__native_batch_norm_legit_2 = async_compile.triton('triton_red_fused__native_batch_norm_legit_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[128, 8192],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__native_batch_norm_legit_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused__native_batch_norm_legit_2(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 128
rnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + (8192*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce(
tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(
tmp2_mean, tmp2_m2, tmp2_weight, 1
)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tl.store(out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr1 + (x0), tmp3, xmask)
tl.store(out_ptr2 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/er/cerl5at3krblbvplgpf7b7cx6mcfh6oicu4azokdq7wwdkmyyk3u.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# x_1 => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3, 4]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_per_fused__native_batch_norm_legit_3 = async_compile.triton('triton_per_fused__native_batch_norm_legit_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 32],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 32
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (32*x0)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + (32*x0)), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (r1 + (32*x0)), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp15 = tmp12[:, None]
tmp16 = 262144.0
tmp17 = tmp14 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp20, xmask)
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/kx/ckx42tpuyndb5gvhpvafnh7qyd5sgmhlww4fuulgch45zdvhwh3f.py
# Topologically Sorted Source Nodes: [scale], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# scale => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_1,), kwargs = {})
triton_poi_fused_sigmoid_4 = async_compile.triton('triton_poi_fused_sigmoid_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x1 = (xindex // 262144)
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.sigmoid(tmp8)
tl.store(out_ptr0 + (x2), tmp9, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/k4/ck4iij64xszzsv7pymvjkxrqh4nbcwe6zrz72hxmjnd7m73teljj.py
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul_2
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %view_3), kwargs = {})
triton_poi_fused_mul_5 = async_compile.triton('triton_poi_fused_mul_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2097152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 262144
x2 = (xindex // 524288)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x0 + (262144*x2)), None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 2, 64, 64, 64), (524288, 262144, 4096, 64, 1))
assert_size_stride(primals_2, (1, 2, 3, 3, 3), (54, 27, 9, 3, 1))
assert_size_stride(primals_3, (1, ), (1, ))
assert_size_stride(primals_4, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 64, 64, 64), (524288, 262144, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_compress], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, buf0, 2097152, grid=grid(2097152), stream=stream0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 64, 64, 64), (262144, 262144, 4096, 64, 1))
buf2 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.repeat]
triton_poi_fused_repeat_1.run(primals_3, buf2, 4, grid=grid(4), stream=stream0)
del primals_3
buf3 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.repeat]
triton_poi_fused_repeat_1.run(primals_4, buf3, 4, grid=grid(4), stream=stream0)
del primals_4
buf4 = empty_strided_cuda((1, 4, 1, 1, 1, 32), (128, 32, 128, 128, 128, 1), torch.float32)
buf5 = empty_strided_cuda((1, 4, 1, 1, 1, 32), (128, 32, 128, 128, 128, 1), torch.float32)
buf6 = empty_strided_cuda((1, 4, 1, 1, 1, 32), (128, 32, 128, 128, 128, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._native_batch_norm_legit]
triton_red_fused__native_batch_norm_legit_2.run(buf1, buf4, buf5, buf6, 128, 8192, grid=grid(128), stream=stream0)
buf7 = empty_strided_cuda((1, 4, 1, 1, 1), (4, 1, 1, 1, 1), torch.float32)
buf8 = empty_strided_cuda((1, 4, 1, 1, 1), (4, 1, 4, 4, 4), torch.float32)
buf10 = reinterpret_tensor(buf8, (1, 4, 1, 1, 1), (4, 1, 1, 1, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_3.run(buf10, buf4, buf5, buf6, buf7, 4, 32, grid=grid(4), stream=stream0)
del buf4
del buf5
del buf6
buf11 = empty_strided_cuda((4, 1, 64, 64, 64), (262144, 1048576, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [scale], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_4.run(buf1, buf7, buf10, buf2, buf3, buf11, 1048576, grid=grid(1048576), stream=stream0)
buf12 = empty_strided_cuda((4, 2, 64, 64, 64), (524288, 262144, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
triton_poi_fused_mul_5.run(primals_1, buf11, buf12, 2097152, grid=grid(2097152), stream=stream0)
del buf11
return (buf12, primals_1, primals_2, buf0, buf1, buf2, buf3, buf7, buf10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 2, 64, 64, 64), (524288, 262144, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 2, 3, 3, 3), (54, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class BasicConv3D(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1,
padding=0, dilation=1, groups=1, relu=True, bn=True, bias=False):
super(BasicConv3D, self).__init__()
self.out_channels = out_planes
self.conv = nn.Conv3d(in_planes, out_planes, kernel_size=
kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias)
self.bn = nn.InstanceNorm3d(out_planes, eps=1e-05, momentum=0.01,
affine=True)
self.relu = nn.LeakyReLU() if relu else None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class ChannelPool3D(nn.Module):
def forward(self, x):
return torch.cat((torch.max(x, 1)[0].unsqueeze(1), torch.mean(x, 1)
.unsqueeze(1)), dim=1)
class SpatialGate3D(nn.Module):
def __init__(self):
super(SpatialGate3D, self).__init__()
kernel_size = 3
self.compress = ChannelPool3D()
self.spatial = BasicConv3D(2, 1, kernel_size, stride=1, padding=(
kernel_size - 1) // 2, relu=False)
def forward(self, x):
x_compress = self.compress(x)
x_out = self.spatial(x_compress)
scale = torch.sigmoid_(x_out)
return x * scale
def get_inputs():
return [torch.rand([4, 2, 64, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 262144 % 2
x0 = xindex % 262144
x2 = xindex // 524288
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 524288 * x2), tmp4, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (262144 + x0 + 524288 * x2), tmp4,
eviction_policy='evict_last', other=0.0)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp13 = tl.load(in_ptr0 + (x0 + 524288 * x2), tmp10, eviction_policy=
'evict_last', other=0.0)
tmp14 = tl.load(in_ptr0 + (262144 + x0 + 524288 * x2), tmp10,
eviction_policy='evict_last', other=0.0)
tmp15 = tmp13 + tmp14
tmp16 = 2.0
tmp17 = tmp15 / tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp10, tmp17, tmp18)
tmp20 = tl.where(tmp4, tmp9, tmp19)
tl.store(out_ptr0 + x3, tmp20, None)
@triton.jit
def triton_poi_fused_repeat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tl.store(out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_red_fused__native_batch_norm_legit_2(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 8192 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tl.store(out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr1 + x0, tmp3, xmask)
tl.store(out_ptr2 + x0, tmp4, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_3(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 32 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 32 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (r1 + 32 * x0), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp12[:, None]
tmp16 = 262144.0
tmp17 = tmp14 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp20, xmask)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused_sigmoid_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x1 = xindex // 262144
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.sigmoid(tmp8)
tl.store(out_ptr0 + x2, tmp9, None)
@triton.jit
def triton_poi_fused_mul_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 262144
x2 = xindex // 524288
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (x0 + 262144 * x2), None, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x3, tmp2, None)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 2, 64, 64, 64), (524288, 262144, 4096,
64, 1))
assert_size_stride(primals_2, (1, 2, 3, 3, 3), (54, 27, 9, 3, 1))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 64, 64, 64), (524288, 262144, 4096,
64, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(2097152)](primals_1, buf0, 2097152,
XBLOCK=1024, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1, 1),
padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 64, 64, 64), (262144, 262144, 4096,
64, 1))
buf2 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_repeat_1[grid(4)](primals_3, buf2, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_3
buf3 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_repeat_1[grid(4)](primals_4, buf3, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_4
buf4 = empty_strided_cuda((1, 4, 1, 1, 1, 32), (128, 32, 128, 128,
128, 1), torch.float32)
buf5 = empty_strided_cuda((1, 4, 1, 1, 1, 32), (128, 32, 128, 128,
128, 1), torch.float32)
buf6 = empty_strided_cuda((1, 4, 1, 1, 1, 32), (128, 32, 128, 128,
128, 1), torch.float32)
triton_red_fused__native_batch_norm_legit_2[grid(128)](buf1, buf4,
buf5, buf6, 128, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf7 = empty_strided_cuda((1, 4, 1, 1, 1), (4, 1, 1, 1, 1), torch.
float32)
buf8 = empty_strided_cuda((1, 4, 1, 1, 1), (4, 1, 4, 4, 4), torch.
float32)
buf10 = reinterpret_tensor(buf8, (1, 4, 1, 1, 1), (4, 1, 1, 1, 1), 0)
del buf8
triton_per_fused__native_batch_norm_legit_3[grid(4)](buf10, buf4,
buf5, buf6, buf7, 4, 32, XBLOCK=1, num_warps=2, num_stages=1)
del buf4
del buf5
del buf6
buf11 = empty_strided_cuda((4, 1, 64, 64, 64), (262144, 1048576,
4096, 64, 1), torch.float32)
triton_poi_fused_sigmoid_4[grid(1048576)](buf1, buf7, buf10, buf2,
buf3, buf11, 1048576, XBLOCK=1024, num_warps=4, num_stages=1)
buf12 = empty_strided_cuda((4, 2, 64, 64, 64), (524288, 262144,
4096, 64, 1), torch.float32)
triton_poi_fused_mul_5[grid(2097152)](primals_1, buf11, buf12,
2097152, XBLOCK=1024, num_warps=4, num_stages=1)
del buf11
return buf12, primals_1, primals_2, buf0, buf1, buf2, buf3, buf7, buf10
class BasicConv3D(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1,
padding=0, dilation=1, groups=1, relu=True, bn=True, bias=False):
super(BasicConv3D, self).__init__()
self.out_channels = out_planes
self.conv = nn.Conv3d(in_planes, out_planes, kernel_size=
kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias)
self.bn = nn.InstanceNorm3d(out_planes, eps=1e-05, momentum=0.01,
affine=True)
self.relu = nn.LeakyReLU() if relu else None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class ChannelPool3D(nn.Module):
def forward(self, x):
return torch.cat((torch.max(x, 1)[0].unsqueeze(1), torch.mean(x, 1)
.unsqueeze(1)), dim=1)
class SpatialGate3DNew(nn.Module):
def __init__(self):
super(SpatialGate3DNew, self).__init__()
kernel_size = 3
self.compress = ChannelPool3D()
self.spatial = BasicConv3D(2, 1, kernel_size, stride=1, padding=(
kernel_size - 1) // 2, relu=False)
def forward(self, input_0):
primals_2 = self.spatial.conv.weight
primals_3 = self.spatial.bn.weight
primals_4 = self.spatial.bn.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| Healingl/3DAPRNet | SpatialGate3D | false | 2,382 | [
"BSD-2-Clause"
] | 0 | 7c5e0028ae844df4e1f26327e8b438532ca0745f | https://github.com/Healingl/3DAPRNet/tree/7c5e0028ae844df4e1f26327e8b438532ca0745f | import torch
import torch.nn as nn
class BasicConv3D(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1,
padding=0, dilation=1, groups=1, relu=True, bn=True, bias=False):
super().__init__()
self.out_channels = out_planes
self.conv = nn.Conv3d(in_planes, out_planes, kernel_size=
kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias)
self.bn = nn.InstanceNorm3d(out_planes, eps=1e-05, momentum=0.01,
affine=True)
self.relu = nn.LeakyReLU() if relu else None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class ChannelPool3D(nn.Module):
def forward(self, x):
return torch.cat((torch.max(x, 1)[0].unsqueeze(1), torch.mean(x, 1)
.unsqueeze(1)), dim=1)
class Model(nn.Module):
def __init__(self):
super().__init__()
kernel_size = 3
self.compress = ChannelPool3D()
self.spatial = BasicConv3D(2, 1, kernel_size, stride=1, padding=(
kernel_size - 1) // 2, relu=False)
def forward(self, x):
x_compress = self.compress(x)
x_out = self.spatial(x_compress)
scale = torch.sigmoid_(x_out)
return x * scale
def get_inputs():
return [torch.rand([4, 2, 64, 64, 64])]
def get_init_inputs():
return []
|
AvgPool2dSame | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/is/cispe7zbbl4nxt2jjus6h5iou2w7htohqj7z2oz6g7nqz6vbpbqr.py
# Topologically Sorted Source Nodes: [avg_pool2d], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# avg_pool2d => avg_pool2d
# Graph fragment:
# %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%arg0_1, [4, 4], [4, 4]), kwargs = {})
triton_poi_fused_avg_pool2d_0 = async_compile.triton('triton_poi_fused_avg_pool2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp18 = tmp17 + tmp16
tmp20 = tmp19 + tmp18
tmp22 = tmp21 + tmp20
tmp24 = tmp23 + tmp22
tmp26 = tmp25 + tmp24
tmp28 = tmp27 + tmp26
tmp30 = tmp29 + tmp28
tmp31 = 0.0625
tmp32 = tmp30 * tmp31
tl.store(out_ptr0 + (x0), tmp32, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [avg_pool2d], Original ATen: [aten.avg_pool2d]
stream0 = get_raw_stream(0)
triton_poi_fused_avg_pool2d_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import numpy as np
from typing import List
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
def get_same_padding(x: 'int', k: 'int', s: 'int', d: 'int'):
return max((math.ceil(x / s) - 1) * s + (k - 1) * d + 1 - x, 0)
def pad_same(x, k: 'List[int]', s: 'List[int]', d: 'List[int]'=(1, 1),
value: 'float'=0):
ih, iw = x.size()[-2:]
pad_h, pad_w = get_same_padding(ih, k[0], s[0], d[0]), get_same_padding(iw,
k[1], s[1], d[1])
if pad_h > 0 or pad_w > 0:
x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h -
pad_h // 2], value=value)
return x
def to_2tuple(item):
if np.isscalar(item):
return item, item
else:
return item
class AvgPool2dSame(nn.AvgPool2d):
""" Tensorflow like 'SAME' wrapper for 2D average pooling
"""
def __init__(self, kernel_size: 'int', stride=None, padding=0,
ceil_mode=False, count_include_pad=True):
kernel_size = to_2tuple(kernel_size)
stride = to_2tuple(stride)
super(AvgPool2dSame, self).__init__(kernel_size, stride, (0, 0),
ceil_mode, count_include_pad)
def forward(self, x):
x = pad_same(x, self.kernel_size, self.stride)
return F.avg_pool2d(x, self.kernel_size, self.stride, self.padding,
self.ceil_mode, self.count_include_pad)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'kernel_size': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import numpy as np
from typing import List
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp18 = tmp17 + tmp16
tmp20 = tmp19 + tmp18
tmp22 = tmp21 + tmp20
tmp24 = tmp23 + tmp22
tmp26 = tmp25 + tmp24
tmp28 = tmp27 + tmp26
tmp30 = tmp29 + tmp28
tmp31 = 0.0625
tmp32 = tmp30 * tmp31
tl.store(out_ptr0 + x0, tmp32, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
def get_same_padding(x: 'int', k: 'int', s: 'int', d: 'int'):
return max((math.ceil(x / s) - 1) * s + (k - 1) * d + 1 - x, 0)
def pad_same(x, k: 'List[int]', s: 'List[int]', d: 'List[int]'=(1, 1),
value: 'float'=0):
ih, iw = x.size()[-2:]
pad_h, pad_w = get_same_padding(ih, k[0], s[0], d[0]), get_same_padding(iw,
k[1], s[1], d[1])
if pad_h > 0 or pad_w > 0:
x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h -
pad_h // 2], value=value)
return x
def to_2tuple(item):
if np.isscalar(item):
return item, item
else:
return item
class AvgPool2dSameNew(nn.AvgPool2d):
""" Tensorflow like 'SAME' wrapper for 2D average pooling
"""
def __init__(self, kernel_size: 'int', stride=None, padding=0,
ceil_mode=False, count_include_pad=True):
kernel_size = to_2tuple(kernel_size)
stride = to_2tuple(stride)
super(AvgPool2dSameNew, self).__init__(kernel_size, stride, (0, 0),
ceil_mode, count_include_pad)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Hcnaeg/DI-engine | AvgPool2dSame | false | 2,383 | [
"Apache-2.0"
] | 0 | aba0c629f87649854091e9e59d948f83962e3e1e | https://github.com/Hcnaeg/DI-engine/tree/aba0c629f87649854091e9e59d948f83962e3e1e | import math
import torch
import numpy as np
from typing import List
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
def get_same_padding(x: 'int', k: 'int', s: 'int', d: 'int'):
return max((math.ceil(x / s) - 1) * s + (k - 1) * d + 1 - x, 0)
def pad_same(x, k: 'List[int]', s: 'List[int]', d: 'List[int]'=(1, 1),
value: 'float'=0):
ih, iw = x.size()[-2:]
pad_h, pad_w = get_same_padding(ih, k[0], s[0], d[0]), get_same_padding(iw,
k[1], s[1], d[1])
if pad_h > 0 or pad_w > 0:
x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h -
pad_h // 2], value=value)
return x
def to_2tuple(item):
if np.isscalar(item):
return item, item
else:
return item
class Model(nn.AvgPool2d):
""" Tensorflow like 'SAME' wrapper for 2D average pooling
"""
def __init__(self, kernel_size: 'int', stride=None, padding=0,
ceil_mode=False, count_include_pad=True):
kernel_size = to_2tuple(kernel_size)
stride = to_2tuple(stride)
super().__init__(kernel_size, stride, (0, 0),
ceil_mode, count_include_pad)
def forward(self, x):
x = pad_same(x, self.kernel_size, self.stride)
return F.avg_pool2d(x, self.kernel_size, self.stride, self.padding,
self.ceil_mode, self.count_include_pad)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
EnsembleFC | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/jv/cjvcsredzlnp5p23u3wgkkflope6kvqewy3nepikau7sddqcldfj.py
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%bmm, %primals_3), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = (xindex // 16)
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 1, 4), (4, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm]
extern_kernels.bmm(primals_1, primals_2, out=buf0)
del primals_2
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(buf1, primals_3, 64, grid=grid(64), stream=stream0)
del primals_3
return (buf1, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class EnsembleFC(nn.Module):
__constants__ = ['in_features', 'out_features']
in_features: 'int'
out_features: 'int'
ensemble_size: 'int'
weight: 'torch.Tensor'
def __init__(self, in_features: 'int', out_features: 'int',
ensemble_size: 'int', weight_decay: 'float'=0.0) ->None:
super(EnsembleFC, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.ensemble_size = ensemble_size
self.weight = nn.Parameter(torch.Tensor(ensemble_size, in_features,
out_features))
self.weight_decay = weight_decay
self.bias = nn.Parameter(torch.Tensor(ensemble_size, 1, out_features))
def forward(self, input: 'torch.Tensor') ->torch.Tensor:
assert input.shape[0] == self.ensemble_size and len(input.shape) == 3
return torch.bmm(input, self.weight) + self.bias
def extra_repr(self) ->str:
return 'in_features={}, out_features={}'.format(self.in_features,
self.out_features)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4, 'ensemble_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 1, 4), (4, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(primals_1, primals_2, out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_0[grid(64)](buf1, primals_3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_3
return buf1, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0)
class EnsembleFCNew(nn.Module):
__constants__ = ['in_features', 'out_features']
in_features: 'int'
out_features: 'int'
ensemble_size: 'int'
weight: 'torch.Tensor'
def __init__(self, in_features: 'int', out_features: 'int',
ensemble_size: 'int', weight_decay: 'float'=0.0) ->None:
super(EnsembleFCNew, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.ensemble_size = ensemble_size
self.weight = nn.Parameter(torch.Tensor(ensemble_size, in_features,
out_features))
self.weight_decay = weight_decay
self.bias = nn.Parameter(torch.Tensor(ensemble_size, 1, out_features))
def extra_repr(self) ->str:
return 'in_features={}, out_features={}'.format(self.in_features,
self.out_features)
def forward(self, input_0):
primals_1 = self.weight
primals_3 = self.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| Hcnaeg/DI-engine | EnsembleFC | false | 2,384 | [
"Apache-2.0"
] | 0 | aba0c629f87649854091e9e59d948f83962e3e1e | https://github.com/Hcnaeg/DI-engine/tree/aba0c629f87649854091e9e59d948f83962e3e1e | import torch
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
__constants__ = ['in_features', 'out_features']
in_features: 'int'
out_features: 'int'
ensemble_size: 'int'
weight: 'torch.Tensor'
def __init__(self, in_features: 'int', out_features: 'int',
ensemble_size: 'int', weight_decay: 'float'=0.0) ->None:
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.ensemble_size = ensemble_size
self.weight = nn.Parameter(torch.Tensor(ensemble_size, in_features,
out_features))
self.weight_decay = weight_decay
self.bias = nn.Parameter(torch.Tensor(ensemble_size, 1, out_features))
def forward(self, input: 'torch.Tensor') ->torch.Tensor:
assert input.shape[0] == self.ensemble_size and len(input.shape) == 3
return torch.bmm(input, self.weight) + self.bias
def extra_repr(self) ->str:
return 'in_features={}, out_features={}'.format(self.in_features,
self.out_features)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
Encoder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/oz/cozqxlcluuaqzreyfue6z5fkzxjeuuwqcorl77txds26n7irqcna.py
# Topologically Sorted Source Nodes: [h, leaky_relu], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# h => convolution
# leaky_relu => gt, mul, where
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.1), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
triton_poi_fused_convolution_leaky_relu_0 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr1 + (x3), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h, leaky_relu], Original ATen: [aten.convolution, aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 256, grid=grid(256), stream=stream0)
del buf0
del primals_2
return (buf2, primals_1, primals_3, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class Conv(nn.Module):
def __init__(self, filters0, filters1, kernel_size, bn, bias=True):
super().__init__()
if bn:
bias = False
self.conv = nn.Conv2d(filters0, filters1, kernel_size, stride=1,
padding=kernel_size // 2, bias=bias)
self.bn = nn.BatchNorm2d(filters1) if bn else None
def forward(self, x):
h = self.conv(x)
if self.bn is not None:
h = self.bn(h)
return h
class Encoder(nn.Module):
def __init__(self, input_size, filters):
super().__init__()
self.input_size = input_size
self.conv = Conv(input_size[0], filters, 3, bn=False)
self.activation = nn.LeakyReLU(0.1)
def forward(self, x):
return self.activation(self.conv(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': [4, 4], 'filters': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0[grid(256)](buf0,
primals_2, buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_2
return buf2, primals_1, primals_3, buf1
class Conv(nn.Module):
def __init__(self, filters0, filters1, kernel_size, bn, bias=True):
super().__init__()
if bn:
bias = False
self.conv = nn.Conv2d(filters0, filters1, kernel_size, stride=1,
padding=kernel_size // 2, bias=bias)
self.bn = nn.BatchNorm2d(filters1) if bn else None
def forward(self, x):
h = self.conv(x)
if self.bn is not None:
h = self.bn(h)
return h
class EncoderNew(nn.Module):
def __init__(self, input_size, filters):
super().__init__()
self.input_size = input_size
self.conv = Conv(input_size[0], filters, 3, bn=False)
self.activation = nn.LeakyReLU(0.1)
def forward(self, input_0):
primals_1 = self.conv.conv.weight
primals_2 = self.conv.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| Hcnaeg/DI-engine | Encoder | false | 2,385 | [
"Apache-2.0"
] | 0 | aba0c629f87649854091e9e59d948f83962e3e1e | https://github.com/Hcnaeg/DI-engine/tree/aba0c629f87649854091e9e59d948f83962e3e1e | import torch
import torch.nn as nn
import torch.utils.data
class Conv(nn.Module):
def __init__(self, filters0, filters1, kernel_size, bn, bias=True):
super().__init__()
if bn:
bias = False
self.conv = nn.Conv2d(filters0, filters1, kernel_size, stride=1,
padding=kernel_size // 2, bias=bias)
self.bn = nn.BatchNorm2d(filters1) if bn else None
def forward(self, x):
h = self.conv(x)
if self.bn is not None:
h = self.bn(h)
return h
class Model(nn.Module):
def __init__(self, input_size, filters):
super().__init__()
self.input_size = input_size
self.conv = Conv(input_size[0], filters, 3, bn=False)
self.activation = nn.LeakyReLU(0.1)
def forward(self, x):
return self.activation(self.conv(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Head | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/oz/cozqxlcluuaqzreyfue6z5fkzxjeuuwqcorl77txds26n7irqcna.py
# Topologically Sorted Source Nodes: [h, h_1], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# h => convolution
# h_1 => gt, mul, where
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.1), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
triton_poi_fused_convolution_leaky_relu_0 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr1 + (x3), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 64), (64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h, h_1], Original ATen: [aten.convolution, aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 256, grid=grid(256), stream=stream0)
del buf0
del primals_2
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf2, (4, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 4), (1, 64), 0), out=buf3)
return (buf3, primals_1, primals_3, buf1, reinterpret_tensor(buf2, (4, 64), (64, 1), 0), primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 64), (64, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class Conv(nn.Module):
def __init__(self, filters0, filters1, kernel_size, bn, bias=True):
super().__init__()
if bn:
bias = False
self.conv = nn.Conv2d(filters0, filters1, kernel_size, stride=1,
padding=kernel_size // 2, bias=bias)
self.bn = nn.BatchNorm2d(filters1) if bn else None
def forward(self, x):
h = self.conv(x)
if self.bn is not None:
h = self.bn(h)
return h
class Head(nn.Module):
def __init__(self, input_size, out_filters, outputs):
super().__init__()
self.board_size = input_size[1] * input_size[2]
self.out_filters = out_filters
self.conv = Conv(input_size[0], out_filters, 1, bn=False)
self.activation = nn.LeakyReLU(0.1)
self.fc = nn.Linear(self.board_size * out_filters, outputs, bias=False)
def forward(self, x):
h = self.activation(self.conv(x))
h = self.fc(h.view(-1, self.board_size * self.out_filters))
return h
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': [4, 4, 4], 'out_filters': 4, 'outputs': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 64), (64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0[grid(256)](buf0,
primals_2, buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_2
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (4, 64), (64, 1), 0),
reinterpret_tensor(primals_4, (64, 4), (1, 64), 0), out=buf3)
return buf3, primals_1, primals_3, buf1, reinterpret_tensor(buf2, (4,
64), (64, 1), 0), primals_4
class Conv(nn.Module):
def __init__(self, filters0, filters1, kernel_size, bn, bias=True):
super().__init__()
if bn:
bias = False
self.conv = nn.Conv2d(filters0, filters1, kernel_size, stride=1,
padding=kernel_size // 2, bias=bias)
self.bn = nn.BatchNorm2d(filters1) if bn else None
def forward(self, x):
h = self.conv(x)
if self.bn is not None:
h = self.bn(h)
return h
class HeadNew(nn.Module):
def __init__(self, input_size, out_filters, outputs):
super().__init__()
self.board_size = input_size[1] * input_size[2]
self.out_filters = out_filters
self.conv = Conv(input_size[0], out_filters, 1, bn=False)
self.activation = nn.LeakyReLU(0.1)
self.fc = nn.Linear(self.board_size * out_filters, outputs, bias=False)
def forward(self, input_0):
primals_1 = self.conv.conv.weight
primals_2 = self.conv.conv.bias
primals_4 = self.fc.weight
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| Hcnaeg/DI-engine | Head | false | 2,386 | [
"Apache-2.0"
] | 0 | aba0c629f87649854091e9e59d948f83962e3e1e | https://github.com/Hcnaeg/DI-engine/tree/aba0c629f87649854091e9e59d948f83962e3e1e | import torch
import torch.nn as nn
import torch.utils.data
class Conv(nn.Module):
def __init__(self, filters0, filters1, kernel_size, bn, bias=True):
super().__init__()
if bn:
bias = False
self.conv = nn.Conv2d(filters0, filters1, kernel_size, stride=1,
padding=kernel_size // 2, bias=bias)
self.bn = nn.BatchNorm2d(filters1) if bn else None
def forward(self, x):
h = self.conv(x)
if self.bn is not None:
h = self.bn(h)
return h
class Model(nn.Module):
def __init__(self, input_size, out_filters, outputs):
super().__init__()
self.board_size = input_size[1] * input_size[2]
self.out_filters = out_filters
self.conv = Conv(input_size[0], out_filters, 1, bn=False)
self.activation = nn.LeakyReLU(0.1)
self.fc = nn.Linear(self.board_size * out_filters, outputs, bias=False)
def forward(self, x):
h = self.activation(self.conv(x))
h = self.fc(h.view(-1, self.board_size * self.out_filters))
return h
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
PPMConcat | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/cr/ccrgimd5zqak747hzrbdpprnae5dbx4vetggrn46afu3ejbaeqzr.py
# Topologically Sorted Source Nodes: [ppm_out, concat_outs], Original ATen: [aten.mean, aten.cat]
# Source node to ATen node mapping:
# concat_outs => cat
# ppm_out => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [-1, -2], True), kwargs = {})
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view, %view_1, %view_2, %view_3], 2), kwargs = {})
triton_per_fused_cat_mean_0 = async_compile.triton('triton_per_fused_cat_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_cat_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_cat_mean_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.store(out_ptr1 + (110*x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/b7/cb7webixgun5kq7klyyw3pye6ybqszrjc476b25fx2hkpqtlyz4c.py
# Topologically Sorted Source Nodes: [ppm_out_1, concat_outs], Original ATen: [aten._adaptive_avg_pool2d, aten.cat]
# Source node to ATen node mapping:
# concat_outs => cat
# ppm_out_1 => _adaptive_avg_pool2d
# Graph fragment:
# %_adaptive_avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten._adaptive_avg_pool2d.default](args = (%arg0_1, [3, 3]), kwargs = {})
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view, %view_1, %view_2, %view_3], 2), kwargs = {})
triton_poi_fused__adaptive_avg_pool2d_cat_1 = async_compile.triton('triton_poi_fused__adaptive_avg_pool2d_cat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__adaptive_avg_pool2d_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__adaptive_avg_pool2d_cat_1(in_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 3) % 3
x0 = xindex % 3
x2 = (xindex // 9)
x5 = xindex
x3 = xindex % 9
tmp0 = ((4*x1) // 3)
tmp1 = 2 + ((4*x1) // 3)
tmp2 = tmp0 < tmp1
tmp3 = ((4*x0) // 3)
tmp4 = 2 + ((4*x0) // 3)
tmp5 = tmp3 < tmp4
tmp6 = tmp2 & tmp5
tmp7 = tl.load(in_ptr0 + ((4*((4*x1) // 3)) + (16*x2) + ((4*x0) // 3)), tmp6 & xmask, other=0.0)
tmp8 = 1 + ((4*x0) // 3)
tmp9 = tmp8 < tmp4
tmp10 = tmp2 & tmp9
tmp11 = tl.load(in_ptr0 + (1 + (4*((4*x1) // 3)) + (16*x2) + ((4*x0) // 3)), tmp10 & xmask, other=0.0)
tmp12 = tmp11 + tmp7
tmp13 = 1 + ((4*x1) // 3)
tmp14 = tmp13 < tmp1
tmp15 = tmp14 & tmp5
tmp16 = tl.load(in_ptr0 + (4 + (4*((4*x1) // 3)) + (16*x2) + ((4*x0) // 3)), tmp15 & xmask, other=0.0)
tmp17 = tmp16 + tmp12
tmp18 = tmp14 & tmp9
tmp19 = tl.load(in_ptr0 + (5 + (4*((4*x1) // 3)) + (16*x2) + ((4*x0) // 3)), tmp18 & xmask, other=0.0)
tmp20 = tmp19 + tmp17
tmp21 = 1.0
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp6, tmp21, tmp22)
tmp24 = tl.where(tmp10, tmp21, tmp22)
tmp25 = tmp24 + tmp23
tmp26 = tl.where(tmp15, tmp21, tmp22)
tmp27 = tmp26 + tmp25
tmp28 = tl.where(tmp18, tmp21, tmp22)
tmp29 = tmp28 + tmp27
tmp30 = tmp20 / tmp29
tl.store(out_ptr1 + (x3 + (110*x2)), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/wp/cwp4azliwtvpgqegpgjubymmobqvhael5uz7meise5e3joe5bqu2.py
# Topologically Sorted Source Nodes: [ppm_out_2, concat_outs], Original ATen: [aten._adaptive_avg_pool2d, aten.cat]
# Source node to ATen node mapping:
# concat_outs => cat
# ppm_out_2 => _adaptive_avg_pool2d_1
# Graph fragment:
# %_adaptive_avg_pool2d_1 : [num_users=1] = call_function[target=torch.ops.aten._adaptive_avg_pool2d.default](args = (%arg0_1, [6, 6]), kwargs = {})
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view, %view_1, %view_2, %view_3], 2), kwargs = {})
triton_poi_fused__adaptive_avg_pool2d_cat_2 = async_compile.triton('triton_poi_fused__adaptive_avg_pool2d_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__adaptive_avg_pool2d_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__adaptive_avg_pool2d_cat_2(in_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 6) % 6
x0 = xindex % 6
x2 = (xindex // 36)
x5 = xindex
x3 = xindex % 36
tmp0 = ((2*x1) // 3)
tmp1 = ((9 + (4*x1)) // 6)
tmp2 = tmp0 < tmp1
tmp3 = ((2*x0) // 3)
tmp4 = ((9 + (4*x0)) // 6)
tmp5 = tmp3 < tmp4
tmp6 = tmp2 & tmp5
tmp7 = tl.load(in_ptr0 + ((4*((2*x1) // 3)) + (16*x2) + ((2*x0) // 3)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp8 = 1 + ((2*x0) // 3)
tmp9 = tmp8 < tmp4
tmp10 = tmp2 & tmp9
tmp11 = tl.load(in_ptr0 + (1 + (4*((2*x1) // 3)) + (16*x2) + ((2*x0) // 3)), tmp10 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp11 + tmp7
tmp13 = 1 + ((2*x1) // 3)
tmp14 = tmp13 < tmp1
tmp15 = tmp14 & tmp5
tmp16 = tl.load(in_ptr0 + (4 + (4*((2*x1) // 3)) + (16*x2) + ((2*x0) // 3)), tmp15 & xmask, eviction_policy='evict_last', other=0.0)
tmp17 = tmp16 + tmp12
tmp18 = tmp14 & tmp9
tmp19 = tl.load(in_ptr0 + (5 + (4*((2*x1) // 3)) + (16*x2) + ((2*x0) // 3)), tmp18 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tmp19 + tmp17
tmp21 = 1.0
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp6, tmp21, tmp22)
tmp24 = tl.where(tmp10, tmp21, tmp22)
tmp25 = tmp24 + tmp23
tmp26 = tl.where(tmp15, tmp21, tmp22)
tmp27 = tmp26 + tmp25
tmp28 = tl.where(tmp18, tmp21, tmp22)
tmp29 = tmp28 + tmp27
tmp30 = tmp20 / tmp29
tl.store(out_ptr1 + (x3 + (110*x2)), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/nd/cnd5jniex5euxalox74lnevxasjts2znoosbfhhqne7m2q47peko.py
# Topologically Sorted Source Nodes: [ppm_out_3, concat_outs], Original ATen: [aten._adaptive_avg_pool2d, aten.cat]
# Source node to ATen node mapping:
# concat_outs => cat
# ppm_out_3 => _adaptive_avg_pool2d_2
# Graph fragment:
# %_adaptive_avg_pool2d_2 : [num_users=1] = call_function[target=torch.ops.aten._adaptive_avg_pool2d.default](args = (%arg0_1, [8, 8]), kwargs = {})
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view, %view_1, %view_2, %view_3], 2), kwargs = {})
triton_poi_fused__adaptive_avg_pool2d_cat_3 = async_compile.triton('triton_poi_fused__adaptive_avg_pool2d_cat_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__adaptive_avg_pool2d_cat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__adaptive_avg_pool2d_cat_3(in_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 8) % 8
x0 = xindex % 8
x2 = (xindex // 64)
x5 = xindex
x3 = xindex % 64
tmp0 = (x1 // 2)
tmp1 = ((11 + (4*x1)) // 8)
tmp2 = tmp0 < tmp1
tmp3 = (x0 // 2)
tmp4 = ((11 + (4*x0)) // 8)
tmp5 = tmp3 < tmp4
tmp6 = tmp2 & tmp5
tmp7 = tl.load(in_ptr0 + ((4*(x1 // 2)) + (16*x2) + (x0 // 2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp8 = 1 + (x0 // 2)
tmp9 = tmp8 < tmp4
tmp10 = tmp2 & tmp9
tmp11 = tl.load(in_ptr0 + (1 + (4*(x1 // 2)) + (16*x2) + (x0 // 2)), tmp10 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp11 + tmp7
tmp13 = 1 + (x1 // 2)
tmp14 = tmp13 < tmp1
tmp15 = tmp14 & tmp5
tmp16 = tl.load(in_ptr0 + (4 + (4*(x1 // 2)) + (16*x2) + (x0 // 2)), tmp15 & xmask, eviction_policy='evict_last', other=0.0)
tmp17 = tmp16 + tmp12
tmp18 = tmp14 & tmp9
tmp19 = tl.load(in_ptr0 + (5 + (4*(x1 // 2)) + (16*x2) + (x0 // 2)), tmp18 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tmp19 + tmp17
tmp21 = 1.0
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp6, tmp21, tmp22)
tmp24 = tl.where(tmp10, tmp21, tmp22)
tmp25 = tmp24 + tmp23
tmp26 = tl.where(tmp15, tmp21, tmp22)
tmp27 = tmp26 + tmp25
tmp28 = tl.where(tmp18, tmp21, tmp22)
tmp29 = tmp28 + tmp27
tmp30 = tmp20 / tmp29
tl.store(out_ptr1 + (x3 + (110*x2)), tmp30, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf8 = empty_strided_cuda((4, 4, 110), (440, 110, 1), torch.float32)
buf4 = reinterpret_tensor(buf8, (4, 4, 1), (440, 110, 1), 0) # alias
# Topologically Sorted Source Nodes: [ppm_out, concat_outs], Original ATen: [aten.mean, aten.cat]
stream0 = get_raw_stream(0)
triton_per_fused_cat_mean_0.run(arg0_1, buf4, 16, 16, grid=grid(16), stream=stream0)
buf5 = reinterpret_tensor(buf8, (4, 4, 9), (440, 110, 1), 1) # alias
# Topologically Sorted Source Nodes: [ppm_out_1, concat_outs], Original ATen: [aten._adaptive_avg_pool2d, aten.cat]
triton_poi_fused__adaptive_avg_pool2d_cat_1.run(arg0_1, buf5, 144, grid=grid(144), stream=stream0)
buf6 = reinterpret_tensor(buf8, (4, 4, 36), (440, 110, 1), 10) # alias
# Topologically Sorted Source Nodes: [ppm_out_2, concat_outs], Original ATen: [aten._adaptive_avg_pool2d, aten.cat]
triton_poi_fused__adaptive_avg_pool2d_cat_2.run(arg0_1, buf6, 576, grid=grid(576), stream=stream0)
buf7 = reinterpret_tensor(buf8, (4, 4, 64), (440, 110, 1), 46) # alias
# Topologically Sorted Source Nodes: [ppm_out_3, concat_outs], Original ATen: [aten._adaptive_avg_pool2d, aten.cat]
triton_poi_fused__adaptive_avg_pool2d_cat_3.run(arg0_1, buf7, 1024, grid=grid(1024), stream=stream0)
del arg0_1
return (buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch._C
import torch.serialization
class PPMConcat(nn.ModuleList):
"""Pyramid Pooling Module that only concat the features of each layer.
Args:
pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
Module.
"""
def __init__(self, pool_scales=(1, 3, 6, 8)):
super(PPMConcat, self).__init__([nn.AdaptiveAvgPool2d(pool_scale) for
pool_scale in pool_scales])
def forward(self, feats):
"""Forward function."""
ppm_outs = []
for ppm in self:
ppm_out = ppm(feats)
ppm_outs.append(ppm_out.view(*feats.shape[:2], -1))
concat_outs = torch.cat(ppm_outs, dim=2)
return concat_outs
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch._C
import torch.serialization
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_cat_mean_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.store(out_ptr1 + 110 * x0, tmp6, xmask)
@triton.jit
def triton_poi_fused__adaptive_avg_pool2d_cat_1(in_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 3 % 3
x0 = xindex % 3
x2 = xindex // 9
x3 = xindex % 9
tmp0 = 4 * x1 // 3
tmp1 = 2 + 4 * x1 // 3
tmp2 = tmp0 < tmp1
tmp3 = 4 * x0 // 3
tmp4 = 2 + 4 * x0 // 3
tmp5 = tmp3 < tmp4
tmp6 = tmp2 & tmp5
tmp7 = tl.load(in_ptr0 + (4 * (4 * x1 // 3) + 16 * x2 + 4 * x0 // 3),
tmp6 & xmask, other=0.0)
tmp8 = 1 + 4 * x0 // 3
tmp9 = tmp8 < tmp4
tmp10 = tmp2 & tmp9
tmp11 = tl.load(in_ptr0 + (1 + 4 * (4 * x1 // 3) + 16 * x2 + 4 * x0 //
3), tmp10 & xmask, other=0.0)
tmp12 = tmp11 + tmp7
tmp13 = 1 + 4 * x1 // 3
tmp14 = tmp13 < tmp1
tmp15 = tmp14 & tmp5
tmp16 = tl.load(in_ptr0 + (4 + 4 * (4 * x1 // 3) + 16 * x2 + 4 * x0 //
3), tmp15 & xmask, other=0.0)
tmp17 = tmp16 + tmp12
tmp18 = tmp14 & tmp9
tmp19 = tl.load(in_ptr0 + (5 + 4 * (4 * x1 // 3) + 16 * x2 + 4 * x0 //
3), tmp18 & xmask, other=0.0)
tmp20 = tmp19 + tmp17
tmp21 = 1.0
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp6, tmp21, tmp22)
tmp24 = tl.where(tmp10, tmp21, tmp22)
tmp25 = tmp24 + tmp23
tmp26 = tl.where(tmp15, tmp21, tmp22)
tmp27 = tmp26 + tmp25
tmp28 = tl.where(tmp18, tmp21, tmp22)
tmp29 = tmp28 + tmp27
tmp30 = tmp20 / tmp29
tl.store(out_ptr1 + (x3 + 110 * x2), tmp30, xmask)
@triton.jit
def triton_poi_fused__adaptive_avg_pool2d_cat_2(in_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 6 % 6
x0 = xindex % 6
x2 = xindex // 36
x3 = xindex % 36
tmp0 = 2 * x1 // 3
tmp1 = (9 + 4 * x1) // 6
tmp2 = tmp0 < tmp1
tmp3 = 2 * x0 // 3
tmp4 = (9 + 4 * x0) // 6
tmp5 = tmp3 < tmp4
tmp6 = tmp2 & tmp5
tmp7 = tl.load(in_ptr0 + (4 * (2 * x1 // 3) + 16 * x2 + 2 * x0 // 3),
tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp8 = 1 + 2 * x0 // 3
tmp9 = tmp8 < tmp4
tmp10 = tmp2 & tmp9
tmp11 = tl.load(in_ptr0 + (1 + 4 * (2 * x1 // 3) + 16 * x2 + 2 * x0 //
3), tmp10 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp11 + tmp7
tmp13 = 1 + 2 * x1 // 3
tmp14 = tmp13 < tmp1
tmp15 = tmp14 & tmp5
tmp16 = tl.load(in_ptr0 + (4 + 4 * (2 * x1 // 3) + 16 * x2 + 2 * x0 //
3), tmp15 & xmask, eviction_policy='evict_last', other=0.0)
tmp17 = tmp16 + tmp12
tmp18 = tmp14 & tmp9
tmp19 = tl.load(in_ptr0 + (5 + 4 * (2 * x1 // 3) + 16 * x2 + 2 * x0 //
3), tmp18 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tmp19 + tmp17
tmp21 = 1.0
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp6, tmp21, tmp22)
tmp24 = tl.where(tmp10, tmp21, tmp22)
tmp25 = tmp24 + tmp23
tmp26 = tl.where(tmp15, tmp21, tmp22)
tmp27 = tmp26 + tmp25
tmp28 = tl.where(tmp18, tmp21, tmp22)
tmp29 = tmp28 + tmp27
tmp30 = tmp20 / tmp29
tl.store(out_ptr1 + (x3 + 110 * x2), tmp30, xmask)
@triton.jit
def triton_poi_fused__adaptive_avg_pool2d_cat_3(in_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 8 % 8
x0 = xindex % 8
x2 = xindex // 64
x3 = xindex % 64
tmp0 = x1 // 2
tmp1 = (11 + 4 * x1) // 8
tmp2 = tmp0 < tmp1
tmp3 = x0 // 2
tmp4 = (11 + 4 * x0) // 8
tmp5 = tmp3 < tmp4
tmp6 = tmp2 & tmp5
tmp7 = tl.load(in_ptr0 + (4 * (x1 // 2) + 16 * x2 + x0 // 2), tmp6 &
xmask, eviction_policy='evict_last', other=0.0)
tmp8 = 1 + x0 // 2
tmp9 = tmp8 < tmp4
tmp10 = tmp2 & tmp9
tmp11 = tl.load(in_ptr0 + (1 + 4 * (x1 // 2) + 16 * x2 + x0 // 2),
tmp10 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp11 + tmp7
tmp13 = 1 + x1 // 2
tmp14 = tmp13 < tmp1
tmp15 = tmp14 & tmp5
tmp16 = tl.load(in_ptr0 + (4 + 4 * (x1 // 2) + 16 * x2 + x0 // 2),
tmp15 & xmask, eviction_policy='evict_last', other=0.0)
tmp17 = tmp16 + tmp12
tmp18 = tmp14 & tmp9
tmp19 = tl.load(in_ptr0 + (5 + 4 * (x1 // 2) + 16 * x2 + x0 // 2),
tmp18 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tmp19 + tmp17
tmp21 = 1.0
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp6, tmp21, tmp22)
tmp24 = tl.where(tmp10, tmp21, tmp22)
tmp25 = tmp24 + tmp23
tmp26 = tl.where(tmp15, tmp21, tmp22)
tmp27 = tmp26 + tmp25
tmp28 = tl.where(tmp18, tmp21, tmp22)
tmp29 = tmp28 + tmp27
tmp30 = tmp20 / tmp29
tl.store(out_ptr1 + (x3 + 110 * x2), tmp30, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf8 = empty_strided_cuda((4, 4, 110), (440, 110, 1), torch.float32)
buf4 = reinterpret_tensor(buf8, (4, 4, 1), (440, 110, 1), 0)
get_raw_stream(0)
triton_per_fused_cat_mean_0[grid(16)](arg0_1, buf4, 16, 16, XBLOCK=
1, num_warps=2, num_stages=1)
buf5 = reinterpret_tensor(buf8, (4, 4, 9), (440, 110, 1), 1)
triton_poi_fused__adaptive_avg_pool2d_cat_1[grid(144)](arg0_1, buf5,
144, XBLOCK=128, num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf8, (4, 4, 36), (440, 110, 1), 10)
triton_poi_fused__adaptive_avg_pool2d_cat_2[grid(576)](arg0_1, buf6,
576, XBLOCK=128, num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf8, (4, 4, 64), (440, 110, 1), 46)
triton_poi_fused__adaptive_avg_pool2d_cat_3[grid(1024)](arg0_1,
buf7, 1024, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf8,
class PPMConcatNew(nn.ModuleList):
"""Pyramid Pooling Module that only concat the features of each layer.
Args:
pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
Module.
"""
def __init__(self, pool_scales=(1, 3, 6, 8)):
super(PPMConcatNew, self).__init__([nn.AdaptiveAvgPool2d(pool_scale
) for pool_scale in pool_scales])
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| ImportPaddle/APCNet | PPMConcat | false | 2,387 | [
"MIT"
] | 0 | 68ade1f83827b4cdd60ee4b6ac25454397100316 | https://github.com/ImportPaddle/APCNet/tree/68ade1f83827b4cdd60ee4b6ac25454397100316 | import torch
import torch.nn as nn
import torch._C
import torch.serialization
class Model(nn.ModuleList):
"""Pyramid Pooling Module that only concat the features of each layer.
Args:
pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
Module.
"""
def __init__(self, pool_scales=(1, 3, 6, 8)):
super().__init__([nn.AdaptiveAvgPool2d(pool_scale) for
pool_scale in pool_scales])
def forward(self, feats):
"""Forward function."""
ppm_outs = []
for ppm in self:
ppm_out = ppm(feats)
ppm_outs.append(ppm_out.view(*feats.shape[:2], -1))
concat_outs = torch.cat(ppm_outs, dim=2)
return concat_outs
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
MultiHeadedAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/dk/cdk4odz276xorciau5ehgl7f3s2mgkf3hrye6xep6kzubczdeqqy.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/bs/cbsluabtq7ll426nybkislhh3cajm6f7ggrxam362hohynwnvtk6.py
# Topologically Sorted Source Nodes: [mask], Original ATen: [aten.eq]
# Source node to ATen node mapping:
# mask => eq
# Graph fragment:
# %eq : [num_users=3] = call_function[target=torch.ops.aten.eq.Scalar](args = (%unsqueeze, 0), kwargs = {})
triton_poi_fused_eq_1 = async_compile.triton('triton_poi_fused_eq_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_eq_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_eq_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/2m/c2mqa6egkdg5wothwstp4t4dtvjgjekw5mx5ldieie6krs4bxolb.py
# Topologically Sorted Source Nodes: [scores, scores_1, softmax], Original ATen: [aten.div, aten.masked_fill, aten._softmax]
# Source node to ATen node mapping:
# scores => div
# scores_1 => full_default, where
# softmax => amax, exp, sub, sum_1
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_11, 1.0), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -inf), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %div), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
triton_poi_fused__softmax_div_masked_fill_2 = async_compile.triton('triton_poi_fused__softmax_div_masked_fill_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_div_masked_fill_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_div_masked_fill_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + ((4*x0) + (16*x2)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp1 = tl.load(in_ptr1 + (4*x3), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp7 = tl.load(in_ptr1 + (1 + (4*x3)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp12 = tl.load(in_ptr1 + (2 + (4*x3)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (3 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp17 = tl.load(in_ptr1 + (3 + (4*x3)), xmask, eviction_policy='evict_last')
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = float("-inf")
tmp5 = tl.where(tmp0, tmp4, tmp3)
tmp8 = tmp7 * tmp2
tmp9 = tl.where(tmp6, tmp4, tmp8)
tmp10 = triton_helpers.maximum(tmp5, tmp9)
tmp13 = tmp12 * tmp2
tmp14 = tl.where(tmp11, tmp4, tmp13)
tmp15 = triton_helpers.maximum(tmp10, tmp14)
tmp18 = tmp17 * tmp2
tmp19 = tl.where(tmp16, tmp4, tmp18)
tmp20 = triton_helpers.maximum(tmp15, tmp19)
tmp21 = tmp5 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp9 - tmp20
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = tmp14 - tmp20
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tmp19 - tmp20
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tl.store(out_ptr0 + (x3), tmp20, xmask)
tl.store(out_ptr1 + (x3), tmp31, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/qh/cqhmiexfxtu22w5zckygdewcq6zc6ji4wfscqj2genrriihibfdb.py
# Topologically Sorted Source Nodes: [scores, scores_1, softmax, attn], Original ATen: [aten.div, aten.masked_fill, aten._softmax]
# Source node to ATen node mapping:
# attn => full_default_1, where_1
# scores => div
# scores_1 => full_default, where
# softmax => amax, div_1, exp, sub
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_11, 1.0), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -inf), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %div), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default_1, %div_1), kwargs = {})
triton_poi_fused__softmax_div_masked_fill_3 = async_compile.triton('triton_poi_fused__softmax_div_masked_fill_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_div_masked_fill_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_div_masked_fill_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = (xindex // 64)
x4 = xindex % 16
x5 = xindex
x6 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x4 + (16*x3)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp1 = tl.load(in_ptr1 + (x5), xmask)
tmp6 = tl.load(in_ptr2 + (x6), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr3 + (x6), xmask, eviction_policy='evict_last')
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = float("-inf")
tmp5 = tl.where(tmp0, tmp4, tmp3)
tmp7 = tmp5 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 / tmp9
tmp11 = 0.0
tmp12 = tl.where(tmp0, tmp11, tmp10)
tl.store(out_ptr0 + (x5), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/6t/c6t5a5ere3lqjiu7zh3uu4oxmpdoujdaqqmeunxqapgzo4m74uav.py
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_10, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf0, primals_3, buf3, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf1, primals_5, buf4, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [mask], Original ATen: [aten.eq]
triton_poi_fused_eq_1.run(primals_10, buf6, 64, grid=grid(64), stream=stream0)
del primals_10
buf7 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf1 # reuse
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [scores, scores_1, softmax], Original ATen: [aten.div, aten.masked_fill, aten._softmax]
triton_poi_fused__softmax_div_masked_fill_2.run(buf6, buf5, buf7, buf8, 64, grid=grid(64), stream=stream0)
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [scores, scores_1, softmax, attn], Original ATen: [aten.div, aten.masked_fill, aten._softmax]
triton_poi_fused__softmax_div_masked_fill_3.run(buf6, buf5, buf7, buf8, buf9, 256, grid=grid(256), stream=stream0)
buf10 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf2, primals_8, buf10, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_8
buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11)
buf12 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf11, buf12, 16, 4, grid=grid(16, 4), stream=stream0)
buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0); del buf11 # reuse
# Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_12, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13)
del primals_12
return (reinterpret_tensor(buf13, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), buf5, buf6, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), primals_11, reinterpret_tensor(buf9, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
from typing import Optional
from typing import Tuple
from torch import nn
class MultiHeadedAttention(nn.Module):
"""Multi-Head Attention layer.
Args:
n_head (int): The number of heads.
n_feat (int): The number of features.
dropout_rate (float): Dropout rate.
"""
def __init__(self, n_head: 'int', n_feat: 'int', dropout_rate: 'float'):
"""Construct an MultiHeadedAttention object."""
super().__init__()
assert n_feat % n_head == 0
self.d_k = n_feat // n_head
self.h = n_head
self.linear_q = nn.Linear(n_feat, n_feat)
self.linear_k = nn.Linear(n_feat, n_feat)
self.linear_v = nn.Linear(n_feat, n_feat)
self.linear_out = nn.Linear(n_feat, n_feat)
self.dropout = nn.Dropout(p=dropout_rate)
def forward_qkv(self, query: 'torch.Tensor', key: 'torch.Tensor', value:
'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Transform query, key and value.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
Returns:
torch.Tensor: Transformed query tensor, size
(#batch, n_head, time1, d_k).
torch.Tensor: Transformed key tensor, size
(#batch, n_head, time2, d_k).
torch.Tensor: Transformed value tensor, size
(#batch, n_head, time2, d_k).
"""
n_batch = query.size(0)
q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)
k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
q = q.transpose(1, 2)
k = k.transpose(1, 2)
v = v.transpose(1, 2)
return q, k, v
def forward_attention(self, value: 'torch.Tensor', scores:
'torch.Tensor', mask: 'Optional[torch.Tensor]') ->torch.Tensor:
"""Compute attention context vector.
Args:
value (torch.Tensor): Transformed value, size
(#batch, n_head, time2, d_k).
scores (torch.Tensor): Attention score, size
(#batch, n_head, time1, time2).
mask (torch.Tensor): Mask, size (#batch, 1, time2) or
(#batch, time1, time2).
Returns:
torch.Tensor: Transformed value (#batch, time1, d_model)
weighted by the attention score (#batch, time1, time2).
"""
n_batch = value.size(0)
if mask is not None:
mask = mask.unsqueeze(1).eq(0)
scores = scores.masked_fill(mask, -float('inf'))
attn = torch.softmax(scores, dim=-1).masked_fill(mask, 0.0)
else:
attn = torch.softmax(scores, dim=-1)
p_attn = self.dropout(attn)
x = torch.matmul(p_attn, value)
x = x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)
return self.linear_out(x)
def forward(self, query: 'torch.Tensor', key: 'torch.Tensor', value:
'torch.Tensor', mask: 'Optional[torch.Tensor]', pos_emb:
'torch.Tensor'=torch.empty(0)) ->torch.Tensor:
"""Compute scaled dot product attention.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
(#batch, time1, time2).
1.When applying cross attention between decoder and encoder,
the batch padding mask for input is in (#batch, 1, T) shape.
2.When applying self attention of encoder,
the mask is in (#batch, T, T) shape.
3.When applying self attention of decoder,
the mask is in (#batch, L, L) shape.
4.If the different position in decoder see different block
of the encoder, such as Mocha, the passed in mask could be
in (#batch, L, T) shape. But there is no such case in current
Wenet.
Returns:
torch.Tensor: Output tensor (#batch, time1, d_model).
"""
q, k, v = self.forward_qkv(query, key, value)
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)
return self.forward_attention(v, scores, mask)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_head': 4, 'n_feat': 4, 'dropout_rate': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from typing import Optional
from typing import Tuple
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_eq_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_div_masked_fill_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4 * x0 + 16 * x2), xmask, eviction_policy=
'evict_last').to(tl.int1)
tmp1 = tl.load(in_ptr1 + 4 * x3, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy
='evict_last').to(tl.int1)
tmp7 = tl.load(in_ptr1 + (1 + 4 * x3), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last').to(tl.int1)
tmp12 = tl.load(in_ptr1 + (2 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last').to(tl.int1)
tmp17 = tl.load(in_ptr1 + (3 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = float('-inf')
tmp5 = tl.where(tmp0, tmp4, tmp3)
tmp8 = tmp7 * tmp2
tmp9 = tl.where(tmp6, tmp4, tmp8)
tmp10 = triton_helpers.maximum(tmp5, tmp9)
tmp13 = tmp12 * tmp2
tmp14 = tl.where(tmp11, tmp4, tmp13)
tmp15 = triton_helpers.maximum(tmp10, tmp14)
tmp18 = tmp17 * tmp2
tmp19 = tl.where(tmp16, tmp4, tmp18)
tmp20 = triton_helpers.maximum(tmp15, tmp19)
tmp21 = tmp5 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp9 - tmp20
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = tmp14 - tmp20
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tmp19 - tmp20
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tl.store(out_ptr0 + x3, tmp20, xmask)
tl.store(out_ptr1 + x3, tmp31, xmask)
@triton.jit
def triton_poi_fused__softmax_div_masked_fill_3(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 64
x4 = xindex % 16
x5 = xindex
x6 = xindex // 4
tmp0 = tl.load(in_ptr0 + (x4 + 16 * x3), xmask, eviction_policy=
'evict_last').to(tl.int1)
tmp1 = tl.load(in_ptr1 + x5, xmask)
tmp6 = tl.load(in_ptr2 + x6, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr3 + x6, xmask, eviction_policy='evict_last')
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = float('-inf')
tmp5 = tl.where(tmp0, tmp4, tmp3)
tmp7 = tmp5 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 / tmp9
tmp11 = 0.0
tmp12 = tl.where(tmp0, tmp11, tmp10)
tl.store(out_ptr0 + x5, tmp12, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_10, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_3, buf3, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_clone_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.bool)
triton_poi_fused_eq_1[grid(64)](primals_10, buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_10
buf7 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf1
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_div_masked_fill_2[grid(64)](buf6, buf5,
buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_div_masked_fill_3[grid(256)](buf6, buf5,
buf7, buf8, buf9, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf10 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf8
triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_8, buf10, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_8
buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11)
buf12 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf7
triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0)
del buf11
extern_kernels.addmm(primals_12, reinterpret_tensor(buf12, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf13)
del primals_12
return reinterpret_tensor(buf13, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0
), buf5, buf6, reinterpret_tensor(buf12, (16, 4), (4, 1), 0
), primals_11, reinterpret_tensor(buf9, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0)
class MultiHeadedAttentionNew(nn.Module):
"""Multi-Head Attention layer.
Args:
n_head (int): The number of heads.
n_feat (int): The number of features.
dropout_rate (float): Dropout rate.
"""
def __init__(self, n_head: 'int', n_feat: 'int', dropout_rate: 'float'):
"""Construct an MultiHeadedAttention object."""
super().__init__()
assert n_feat % n_head == 0
self.d_k = n_feat // n_head
self.h = n_head
self.linear_q = nn.Linear(n_feat, n_feat)
self.linear_k = nn.Linear(n_feat, n_feat)
self.linear_v = nn.Linear(n_feat, n_feat)
self.linear_out = nn.Linear(n_feat, n_feat)
self.dropout = nn.Dropout(p=dropout_rate)
def forward_qkv(self, query: 'torch.Tensor', key: 'torch.Tensor', value:
'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Transform query, key and value.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
Returns:
torch.Tensor: Transformed query tensor, size
(#batch, n_head, time1, d_k).
torch.Tensor: Transformed key tensor, size
(#batch, n_head, time2, d_k).
torch.Tensor: Transformed value tensor, size
(#batch, n_head, time2, d_k).
"""
n_batch = query.size(0)
q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)
k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
q = q.transpose(1, 2)
k = k.transpose(1, 2)
v = v.transpose(1, 2)
return q, k, v
def forward_attention(self, value: 'torch.Tensor', scores:
'torch.Tensor', mask: 'Optional[torch.Tensor]') ->torch.Tensor:
"""Compute attention context vector.
Args:
value (torch.Tensor): Transformed value, size
(#batch, n_head, time2, d_k).
scores (torch.Tensor): Attention score, size
(#batch, n_head, time1, time2).
mask (torch.Tensor): Mask, size (#batch, 1, time2) or
(#batch, time1, time2).
Returns:
torch.Tensor: Transformed value (#batch, time1, d_model)
weighted by the attention score (#batch, time1, time2).
"""
n_batch = value.size(0)
if mask is not None:
mask = mask.unsqueeze(1).eq(0)
scores = scores.masked_fill(mask, -float('inf'))
attn = torch.softmax(scores, dim=-1).masked_fill(mask, 0.0)
else:
attn = torch.softmax(scores, dim=-1)
p_attn = self.dropout(attn)
x = torch.matmul(p_attn, value)
x = x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)
return self.linear_out(x)
def forward(self, input_0, input_1, input_2, input_3):
primals_2 = self.linear_q.weight
primals_3 = self.linear_q.bias
primals_4 = self.linear_k.weight
primals_5 = self.linear_k.bias
primals_7 = self.linear_v.weight
primals_8 = self.linear_v.bias
primals_11 = self.linear_out.weight
primals_12 = self.linear_out.bias
primals_1 = input_0
primals_6 = input_1
primals_9 = input_2
primals_10 = input_3
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
| InfluencerNGZK/wenet | MultiHeadedAttention | false | 2,388 | [
"Apache-2.0"
] | 0 | 9a3c7f70a78ce675f5e013b1f67a06d1d23fba3e | https://github.com/InfluencerNGZK/wenet/tree/9a3c7f70a78ce675f5e013b1f67a06d1d23fba3e | import math
import torch
from typing import Optional
from typing import Tuple
from torch import nn
class Model(nn.Module):
"""Multi-Head Attention layer.
Args:
n_head (int): The number of heads.
n_feat (int): The number of features.
dropout_rate (float): Dropout rate.
"""
def __init__(self, n_head: 'int', n_feat: 'int', dropout_rate: 'float'):
"""Construct an MultiHeadedAttention object."""
super().__init__()
assert n_feat % n_head == 0
self.d_k = n_feat // n_head
self.h = n_head
self.linear_q = nn.Linear(n_feat, n_feat)
self.linear_k = nn.Linear(n_feat, n_feat)
self.linear_v = nn.Linear(n_feat, n_feat)
self.linear_out = nn.Linear(n_feat, n_feat)
self.dropout = nn.Dropout(p=dropout_rate)
def forward_qkv(self, query: 'torch.Tensor', key: 'torch.Tensor', value:
'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Transform query, key and value.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
Returns:
torch.Tensor: Transformed query tensor, size
(#batch, n_head, time1, d_k).
torch.Tensor: Transformed key tensor, size
(#batch, n_head, time2, d_k).
torch.Tensor: Transformed value tensor, size
(#batch, n_head, time2, d_k).
"""
n_batch = query.size(0)
q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)
k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
q = q.transpose(1, 2)
k = k.transpose(1, 2)
v = v.transpose(1, 2)
return q, k, v
def forward_attention(self, value: 'torch.Tensor', scores:
'torch.Tensor', mask: 'Optional[torch.Tensor]') ->torch.Tensor:
"""Compute attention context vector.
Args:
value (torch.Tensor): Transformed value, size
(#batch, n_head, time2, d_k).
scores (torch.Tensor): Attention score, size
(#batch, n_head, time1, time2).
mask (torch.Tensor): Mask, size (#batch, 1, time2) or
(#batch, time1, time2).
Returns:
torch.Tensor: Transformed value (#batch, time1, d_model)
weighted by the attention score (#batch, time1, time2).
"""
n_batch = value.size(0)
if mask is not None:
mask = mask.unsqueeze(1).eq(0)
scores = scores.masked_fill(mask, -float('inf'))
attn = torch.softmax(scores, dim=-1).masked_fill(mask, 0.0)
else:
attn = torch.softmax(scores, dim=-1)
p_attn = self.dropout(attn)
x = torch.matmul(p_attn, value)
x = x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)
return self.linear_out(x)
def forward(self, query: 'torch.Tensor', key: 'torch.Tensor', value:
'torch.Tensor', mask: 'Optional[torch.Tensor]', pos_emb:
'torch.Tensor'=torch.empty(0)) ->torch.Tensor:
"""Compute scaled dot product attention.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
(#batch, time1, time2).
1.When applying cross attention between decoder and encoder,
the batch padding mask for input is in (#batch, 1, T) shape.
2.When applying self attention of encoder,
the mask is in (#batch, T, T) shape.
3.When applying self attention of decoder,
# ... truncated (>4000 chars) for memory efficiency |
SENet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/nc/cncwsucylpsg2zmlivjfxu6vbd64ztxjndlsix2ysjtby3xohgk4.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# out_1 => tanh
# Graph fragment:
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {})
triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_tanh_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.tanh]
triton_poi_fused_tanh_0.run(buf3, primals_5, 256, grid=grid(256), stream=stream0)
del primals_5
return (buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, buf3, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class SENet(nn.Module):
"""support estimation network"""
def __init__(self, input_size: 'int', hidden_size: 'int', output_dims:
'int') ->None:
super(SENet, self).__init__()
self.l_1 = nn.Linear(input_size, hidden_size)
self.l_2 = nn.Linear(hidden_size, output_dims)
self.act = nn.Tanh()
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
out = self.l_1(x)
out = self.act(out)
out = self.l_2(out)
out = self.act(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'output_dims': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(256)](buf1, primals_2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_tanh_0[grid(256)](buf3, primals_5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_5
return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, buf3, primals_4
class SENetNew(nn.Module):
"""support estimation network"""
def __init__(self, input_size: 'int', hidden_size: 'int', output_dims:
'int') ->None:
super(SENetNew, self).__init__()
self.l_1 = nn.Linear(input_size, hidden_size)
self.l_2 = nn.Linear(hidden_size, output_dims)
self.act = nn.Tanh()
def forward(self, input_0):
primals_1 = self.l_1.weight
primals_2 = self.l_1.bias
primals_4 = self.l_2.weight
primals_5 = self.l_2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| Hcnaeg/DI-engine | SENet | false | 2,389 | [
"Apache-2.0"
] | 0 | aba0c629f87649854091e9e59d948f83962e3e1e | https://github.com/Hcnaeg/DI-engine/tree/aba0c629f87649854091e9e59d948f83962e3e1e | import torch
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
"""support estimation network"""
def __init__(self, input_size: 'int', hidden_size: 'int', output_dims:
'int') ->None:
super().__init__()
self.l_1 = nn.Linear(input_size, hidden_size)
self.l_2 = nn.Linear(hidden_size, output_dims)
self.act = nn.Tanh()
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
out = self.l_1(x)
out = self.act(out)
out = self.l_2(out)
out = self.act(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
ClippedLinearQuantization | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/zp/czptdn7jot5nhvvxjgbeji75wpdaf2gpuyywmstgg3fjdtzgdimv.py
# Topologically Sorted Source Nodes: [input_1, mul, sub, output, add, output_1], Original ATen: [aten.clamp, aten.mul, aten.sub, aten.round, aten.add, aten.div]
# Source node to ATen node mapping:
# add => add
# input_1 => clamp_max, clamp_min
# mul => mul
# output => round_1
# output_1 => div
# sub => sub
# Graph fragment:
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%arg0_1, 0), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 4), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_max, 3.75), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, 0.0), kwargs = {})
# %round_1 : [num_users=1] = call_function[target=torch.ops.aten.round.default](args = (%sub,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%round_1, 0.0), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, 3.75), kwargs = {})
triton_poi_fused_add_clamp_div_mul_round_sub_0 = async_compile.triton('triton_poi_fused_add_clamp_div_mul_round_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_mul_round_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_mul_round_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 4.0
tmp4 = triton_helpers.minimum(tmp2, tmp3)
tmp5 = 3.75
tmp6 = tmp4 * tmp5
tmp7 = tmp6 - tmp1
tmp8 = libdevice.nearbyint(tmp7)
tmp9 = tmp8 + tmp1
tmp10 = 0.26666666666666666
tmp11 = tmp9 * tmp10
tl.store(out_ptr0 + (x0), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_1, mul, sub, output, add, output_1], Original ATen: [aten.clamp, aten.mul, aten.sub, aten.round, aten.add, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_clamp_div_mul_round_sub_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch.optim.lr_scheduler import *
import torch.optim
import torch.nn as nn
import torch.optim.lr_scheduler
import torch.nn.parallel
import torch.utils.data
import torch.onnx
import torch.testing
def linear_dequantize(input, scale, zero_point, inplace=False):
if inplace:
input.add_(zero_point).div_(scale)
return input
return (input + zero_point) / scale
def linear_quantize(input, scale, zero_point, inplace=False):
if inplace:
input.mul_(scale).sub_(zero_point).round_()
return input
return torch.round(scale * input - zero_point)
def _prep_saturation_val_tensor(sat_val):
is_scalar = not isinstance(sat_val, torch.Tensor)
out = torch.tensor(sat_val) if is_scalar else sat_val.clone().detach()
if not out.is_floating_point():
out = out
if out.dim() == 0:
out = out.unsqueeze(0)
return is_scalar, out
def asymmetric_linear_quantization_params(num_bits, saturation_min,
saturation_max, integral_zero_point=True, signed=False):
scalar_min, sat_min = _prep_saturation_val_tensor(saturation_min)
scalar_max, sat_max = _prep_saturation_val_tensor(saturation_max)
is_scalar = scalar_min and scalar_max
if scalar_max and not scalar_min:
sat_max = sat_max
elif scalar_min and not scalar_max:
sat_min = sat_min
if any(sat_min > sat_max):
raise ValueError('saturation_min must be smaller than saturation_max')
n = 2 ** num_bits - 1
sat_min = torch.min(sat_min, torch.zeros_like(sat_min))
sat_max = torch.max(sat_max, torch.zeros_like(sat_max))
diff = sat_max - sat_min
diff[diff == 0] = n
scale = n / diff
zero_point = scale * sat_min
if integral_zero_point:
zero_point = zero_point.round()
if signed:
zero_point += 2 ** (num_bits - 1)
if is_scalar:
return scale.item(), zero_point.item()
return scale, zero_point
def clamp(input, min, max, inplace=False):
if inplace:
input.clamp_(min, max)
return input
return torch.clamp(input, min, max)
class LinearQuantizeSTE(torch.autograd.Function):
@staticmethod
def forward(ctx, input, scale, zero_point, dequantize, inplace):
if inplace:
ctx.mark_dirty(input)
output = linear_quantize(input, scale, zero_point, inplace)
if dequantize:
output = linear_dequantize(output, scale, zero_point, inplace)
return output
@staticmethod
def backward(ctx, grad_output):
return grad_output, None, None, None, None
class ClippedLinearQuantization(nn.Module):
def __init__(self, num_bits, clip_val, dequantize=True, inplace=False):
super(ClippedLinearQuantization, self).__init__()
self.num_bits = num_bits
self.clip_val = clip_val
self.scale, self.zero_point = asymmetric_linear_quantization_params(
num_bits, 0, clip_val, signed=False)
self.dequantize = dequantize
self.inplace = inplace
def forward(self, input):
input = clamp(input, 0, self.clip_val, self.inplace)
input = LinearQuantizeSTE.apply(input, self.scale, self.zero_point,
self.dequantize, self.inplace)
return input
def __repr__(self):
inplace_str = ', inplace' if self.inplace else ''
return '{0}(num_bits={1}, clip_val={2}{3})'.format(self.__class__.
__name__, self.num_bits, self.clip_val, inplace_str)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_bits': 4, 'clip_val': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch.optim.lr_scheduler import *
import torch.optim
import torch.nn as nn
import torch.optim.lr_scheduler
import torch.nn.parallel
import torch.utils.data
import torch.onnx
import torch.testing
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_clamp_div_mul_round_sub_0(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 4.0
tmp4 = triton_helpers.minimum(tmp2, tmp3)
tmp5 = 3.75
tmp6 = tmp4 * tmp5
tmp7 = tmp6 - tmp1
tmp8 = libdevice.nearbyint(tmp7)
tmp9 = tmp8 + tmp1
tmp10 = 0.26666666666666666
tmp11 = tmp9 * tmp10
tl.store(out_ptr0 + x0, tmp11, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_clamp_div_mul_round_sub_0[grid(256)](arg0_1,
buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
def linear_dequantize(input, scale, zero_point, inplace=False):
if inplace:
input.add_(zero_point).div_(scale)
return input
return (input + zero_point) / scale
def linear_quantize(input, scale, zero_point, inplace=False):
if inplace:
input.mul_(scale).sub_(zero_point).round_()
return input
return torch.round(scale * input - zero_point)
def _prep_saturation_val_tensor(sat_val):
is_scalar = not isinstance(sat_val, torch.Tensor)
out = torch.tensor(sat_val) if is_scalar else sat_val.clone().detach()
if not out.is_floating_point():
out = out
if out.dim() == 0:
out = out.unsqueeze(0)
return is_scalar, out
def asymmetric_linear_quantization_params(num_bits, saturation_min,
saturation_max, integral_zero_point=True, signed=False):
scalar_min, sat_min = _prep_saturation_val_tensor(saturation_min)
scalar_max, sat_max = _prep_saturation_val_tensor(saturation_max)
is_scalar = scalar_min and scalar_max
if scalar_max and not scalar_min:
sat_max = sat_max
elif scalar_min and not scalar_max:
sat_min = sat_min
if any(sat_min > sat_max):
raise ValueError('saturation_min must be smaller than saturation_max')
n = 2 ** num_bits - 1
sat_min = torch.min(sat_min, torch.zeros_like(sat_min))
sat_max = torch.max(sat_max, torch.zeros_like(sat_max))
diff = sat_max - sat_min
diff[diff == 0] = n
scale = n / diff
zero_point = scale * sat_min
if integral_zero_point:
zero_point = zero_point.round()
if signed:
zero_point += 2 ** (num_bits - 1)
if is_scalar:
return scale.item(), zero_point.item()
return scale, zero_point
def clamp(input, min, max, inplace=False):
if inplace:
input.clamp_(min, max)
return input
return torch.clamp(input, min, max)
class LinearQuantizeSTE(torch.autograd.Function):
@staticmethod
def forward(ctx, input, scale, zero_point, dequantize, inplace):
if inplace:
ctx.mark_dirty(input)
output = linear_quantize(input, scale, zero_point, inplace)
if dequantize:
output = linear_dequantize(output, scale, zero_point, inplace)
return output
@staticmethod
def backward(ctx, grad_output):
return grad_output, None, None, None, None
class ClippedLinearQuantizationNew(nn.Module):
def __init__(self, num_bits, clip_val, dequantize=True, inplace=False):
super(ClippedLinearQuantizationNew, self).__init__()
self.num_bits = num_bits
self.clip_val = clip_val
self.scale, self.zero_point = asymmetric_linear_quantization_params(
num_bits, 0, clip_val, signed=False)
self.dequantize = dequantize
self.inplace = inplace
def __repr__(self):
inplace_str = ', inplace' if self.inplace else ''
return '{0}(num_bits={1}, clip_val={2}{3})'.format(self.__class__.
__name__, self.num_bits, self.clip_val, inplace_str)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| HatsuneMiku4/distiller | ClippedLinearQuantization | false | 2,390 | [
"Apache-2.0"
] | 0 | 8fbacb01ebcb7d70c5d3ecb6a88093e6c4d42137 | https://github.com/HatsuneMiku4/distiller/tree/8fbacb01ebcb7d70c5d3ecb6a88093e6c4d42137 | import torch
from torch.optim.lr_scheduler import *
import torch.optim
import torch.nn as nn
import torch.optim.lr_scheduler
import torch.nn.parallel
import torch.utils.data
import torch.onnx
import torch.testing
def linear_dequantize(input, scale, zero_point, inplace=False):
if inplace:
input.add_(zero_point).div_(scale)
return input
return (input + zero_point) / scale
def linear_quantize(input, scale, zero_point, inplace=False):
if inplace:
input.mul_(scale).sub_(zero_point).round_()
return input
return torch.round(scale * input - zero_point)
def _prep_saturation_val_tensor(sat_val):
is_scalar = not isinstance(sat_val, torch.Tensor)
out = torch.tensor(sat_val) if is_scalar else sat_val.clone().detach()
if not out.is_floating_point():
out = out
if out.dim() == 0:
out = out.unsqueeze(0)
return is_scalar, out
def asymmetric_linear_quantization_params(num_bits, saturation_min,
saturation_max, integral_zero_point=True, signed=False):
scalar_min, sat_min = _prep_saturation_val_tensor(saturation_min)
scalar_max, sat_max = _prep_saturation_val_tensor(saturation_max)
is_scalar = scalar_min and scalar_max
if scalar_max and not scalar_min:
sat_max = sat_max
elif scalar_min and not scalar_max:
sat_min = sat_min
if any(sat_min > sat_max):
raise ValueError('saturation_min must be smaller than saturation_max')
n = 2 ** num_bits - 1
sat_min = torch.min(sat_min, torch.zeros_like(sat_min))
sat_max = torch.max(sat_max, torch.zeros_like(sat_max))
diff = sat_max - sat_min
diff[diff == 0] = n
scale = n / diff
zero_point = scale * sat_min
if integral_zero_point:
zero_point = zero_point.round()
if signed:
zero_point += 2 ** (num_bits - 1)
if is_scalar:
return scale.item(), zero_point.item()
return scale, zero_point
def clamp(input, min, max, inplace=False):
if inplace:
input.clamp_(min, max)
return input
return torch.clamp(input, min, max)
class LinearQuantizeSTE(torch.autograd.Function):
@staticmethod
def forward(ctx, input, scale, zero_point, dequantize, inplace):
if inplace:
ctx.mark_dirty(input)
output = linear_quantize(input, scale, zero_point, inplace)
if dequantize:
output = linear_dequantize(output, scale, zero_point, inplace)
return output
@staticmethod
def backward(ctx, grad_output):
return grad_output, None, None, None, None
class Model(nn.Module):
def __init__(self, num_bits, clip_val, dequantize=True, inplace=False):
super().__init__()
self.num_bits = num_bits
self.clip_val = clip_val
self.scale, self.zero_point = asymmetric_linear_quantization_params(
num_bits, 0, clip_val, signed=False)
self.dequantize = dequantize
self.inplace = inplace
def forward(self, input):
input = clamp(input, 0, self.clip_val, self.inplace)
input = LinearQuantizeSTE.apply(input, self.scale, self.zero_point,
self.dequantize, self.inplace)
return input
def __repr__(self):
inplace_str = ', inplace' if self.inplace else ''
return '{0}(num_bits={1}, clip_val={2}{3})'.format(self.__class__.
__name__, self.num_bits, self.clip_val, inplace_str)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
GEGLU | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/g6/cg6py454hsmh5ek5ufygwdjqk6x5t4pxnpc52hre5drrru6gkirg.py
# Topologically Sorted Source Nodes: [gelu, mul], Original ATen: [aten.gelu, aten.mul]
# Source node to ATen node mapping:
# gelu => add, erf, mul, mul_1, mul_2
# mul => mul_3
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_1, 0.5), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_1, 0.7071067811865476), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem, %mul_2), kwargs = {})
triton_poi_fused_gelu_mul_0 = async_compile.triton('triton_poi_fused_gelu_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_gelu_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (2 + x0 + (4*x1)), xmask)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = 0.7071067811865476
tmp5 = tmp1 * tmp4
tmp6 = libdevice.erf(tmp5)
tmp7 = 1.0
tmp8 = tmp6 + tmp7
tmp9 = tmp3 * tmp8
tmp10 = tmp0 * tmp9
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [gelu, mul], Original ATen: [aten.gelu, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_gelu_mul_0.run(arg0_1, buf0, 128, grid=grid(128), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class GEGLU(nn.Module):
def forward(self, x):
x, gates = x.chunk(2, dim=-1)
return x * F.gelu(gates)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_gelu_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (2 + x0 + 4 * x1), xmask)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = 0.7071067811865476
tmp5 = tmp1 * tmp4
tmp6 = libdevice.erf(tmp5)
tmp7 = 1.0
tmp8 = tmp6 + tmp7
tmp9 = tmp3 * tmp8
tmp10 = tmp0 * tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_gelu_mul_0[grid(128)](arg0_1, buf0, 128, XBLOCK=
128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class GEGLUNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| JaireYu/perceiver-pytorch | GEGLU | false | 2,391 | [
"MIT"
] | 0 | 23edd66a057bb0a6fc15126461b4409a522ca09e | https://github.com/JaireYu/perceiver-pytorch/tree/23edd66a057bb0a6fc15126461b4409a522ca09e | import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
def forward(self, x):
x, gates = x.chunk(2, dim=-1)
return x * F.gelu(gates)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
ScaledDotProductAttention | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/xm/cxmdp5t3wxotyujeiyf3zjeco74mukztuucnsawsu5mwwpr2vkgy.py
# Topologically Sorted Source Nodes: [truediv], Original ATen: [aten.div]
# Source node to ATen node mapping:
# truediv => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, 2.0), kwargs = {})
triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/hz/chz2sqsqk26mwhf2dxhgh44jfpu2er5yqjftwkzfav5ctqtx5e7f.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_2, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_2, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/3f/c3fx6bzkalkw7u7askqdnz4rzlcoyqiec4r434sjc5x3axxgkrmr.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => div_1, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [truediv], Original ATen: [aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg1_1, (16, 4, 4), (16, 1, 4), 0), out=buf1)
del arg1_1
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0)
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf2, buf3, 256, grid=grid(256), stream=stream0)
buf4 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf4)
del arg2_1
del buf3
return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from typing import Optional
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
class ScaledDotProductAttention(nn.Module):
"""
Overview:
Implementation of dot product attentionn with scaling.
"""
def __init__(self, d_k: 'int', dropout: 'float'=0.0) ->None:
super(ScaledDotProductAttention, self).__init__()
self.d_k = d_k
self.dropout = nn.Dropout(dropout)
def forward(self, q: 'torch.Tensor', k: 'torch.Tensor', v:
'torch.Tensor', mask: 'Optional[torch.Tensor]'=None) ->torch.Tensor:
attn = torch.matmul(q / self.d_k ** 0.5, k.transpose(2, 3))
if mask is not None:
attn = attn.masked_fill(~mask, -1000000000.0)
attn = self.dropout(F.softmax(attn, dim=-1))
output = torch.matmul(attn, v)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_k': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(arg1_1, (16, 4, 4), (16, 1, 4), 0), out=buf1
)
del arg1_1
buf2 = buf0
del buf0
triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused__softmax_2[grid(256)](buf2, buf3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf4
)
del arg2_1
del buf3
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0),
class ScaledDotProductAttentionNew(nn.Module):
"""
Overview:
Implementation of dot product attentionn with scaling.
"""
def __init__(self, d_k: 'int', dropout: 'float'=0.0) ->None:
super(ScaledDotProductAttentionNew, self).__init__()
self.d_k = d_k
self.dropout = nn.Dropout(dropout)
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| Hcnaeg/DI-engine | ScaledDotProductAttention | false | 2,392 | [
"Apache-2.0"
] | 0 | aba0c629f87649854091e9e59d948f83962e3e1e | https://github.com/Hcnaeg/DI-engine/tree/aba0c629f87649854091e9e59d948f83962e3e1e | import torch
from typing import Optional
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
class Model(nn.Module):
"""
Overview:
Implementation of dot product attentionn with scaling.
"""
def __init__(self, d_k: 'int', dropout: 'float'=0.0) ->None:
super().__init__()
self.d_k = d_k
self.dropout = nn.Dropout(dropout)
def forward(self, q: 'torch.Tensor', k: 'torch.Tensor', v:
'torch.Tensor', mask: 'Optional[torch.Tensor]'=None) ->torch.Tensor:
attn = torch.matmul(q / self.d_k ** 0.5, k.transpose(2, 3))
if mask is not None:
attn = attn.masked_fill(~mask, -1000000000.0)
attn = self.dropout(F.softmax(attn, dim=-1))
output = torch.matmul(attn, v)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
RewardModelNetwork | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/nc/cncwsucylpsg2zmlivjfxu6vbd64ztxjndlsix2ysjtby3xohgk4.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# out_1 => tanh
# Graph fragment:
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {})
triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/q5/cq52p2qap7uob2ddnn4qeh67r3muutkp3yhbkqpu4eqaemol3idl.py
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# out_3 => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_3,), kwargs = {})
triton_poi_fused_sigmoid_1 = async_compile.triton('triton_poi_fused_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_tanh_0.run(buf1, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_1.run(buf3, primals_5, 256, grid=grid(256), stream=stream0)
del primals_5
return (buf3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf1, buf3, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class RewardModelNetwork(nn.Module):
def __init__(self, input_size: 'int', hidden_size: 'int', output_size:
'int') ->None:
super(RewardModelNetwork, self).__init__()
self.l1 = nn.Linear(input_size, hidden_size)
self.l2 = nn.Linear(hidden_size, output_size)
self.a1 = nn.Tanh()
self.a2 = nn.Sigmoid()
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
out = x
out = self.l1(out)
out = self.a1(out)
out = self.l2(out)
out = self.a2(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'output_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(256)](buf1, primals_3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_sigmoid_1[grid(256)](buf3, primals_5, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_5
return buf3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), buf1, buf3, primals_4
class RewardModelNetworkNew(nn.Module):
def __init__(self, input_size: 'int', hidden_size: 'int', output_size:
'int') ->None:
super(RewardModelNetworkNew, self).__init__()
self.l1 = nn.Linear(input_size, hidden_size)
self.l2 = nn.Linear(hidden_size, output_size)
self.a1 = nn.Tanh()
self.a2 = nn.Sigmoid()
def forward(self, input_0):
primals_2 = self.l1.weight
primals_3 = self.l1.bias
primals_4 = self.l2.weight
primals_5 = self.l2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| Hcnaeg/DI-engine | RewardModelNetwork | false | 2,393 | [
"Apache-2.0"
] | 0 | aba0c629f87649854091e9e59d948f83962e3e1e | https://github.com/Hcnaeg/DI-engine/tree/aba0c629f87649854091e9e59d948f83962e3e1e | import torch
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
def __init__(self, input_size: 'int', hidden_size: 'int', output_size:
'int') ->None:
super().__init__()
self.l1 = nn.Linear(input_size, hidden_size)
self.l2 = nn.Linear(hidden_size, output_size)
self.a1 = nn.Tanh()
self.a2 = nn.Sigmoid()
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
out = x
out = self.l1(out)
out = self.a1(out)
out = self.l2(out)
out = self.a2(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
OutputTransition | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/nc/cncyyp5w7ugisohxqkosctv5wc5f7u7cfra24vd6r5vixoiwmbl6.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# out => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%squeeze,), kwargs = {})
triton_poi_fused_sigmoid_0 = async_compile.triton('triton_poi_fused_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 64)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1, 1), (4, 1, 1, 1, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv3d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.sigmoid]
stream0 = get_raw_stream(0)
triton_poi_fused_sigmoid_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
return (buf1, primals_1, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 1, 1, 1), (4, 1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class OutputTransition(nn.Module):
def __init__(self, inChans, n_labels):
super(OutputTransition, self).__init__()
self.final_conv = nn.Conv3d(inChans, n_labels, kernel_size=1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
out = self.sigmoid(self.final_conv(x))
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inChans': 4, 'n_labels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1, 1), (4, 1, 1, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1,
4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1,
1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_sigmoid_0[grid(256)](buf1, primals_2, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4),
(256, 64, 16, 4, 1), 0), buf1
class OutputTransitionNew(nn.Module):
def __init__(self, inChans, n_labels):
super(OutputTransitionNew, self).__init__()
self.final_conv = nn.Conv3d(inChans, n_labels, kernel_size=1)
self.sigmoid = nn.Sigmoid()
def forward(self, input_0):
primals_1 = self.final_conv.weight
primals_2 = self.final_conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| JXQI/ModelsGenesis | OutputTransition | false | 2,394 | [
"MIT"
] | 0 | f961288313a78f03bd3045ac27722f791f365bd8 | https://github.com/JXQI/ModelsGenesis/tree/f961288313a78f03bd3045ac27722f791f365bd8 | import torch
from torch import nn
class Model(nn.Module):
def __init__(self, inChans, n_labels):
super().__init__()
self.final_conv = nn.Conv3d(inChans, n_labels, kernel_size=1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
out = self.sigmoid(self.final_conv(x))
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
ZeroPad1d | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/zg/czgid72b6fkrabkbzh6dovarzeothmiqpqb7azoiz6xeuyfhgius.py
# Topologically Sorted Source Nodes: [pad], Original ATen: [aten.constant_pad_nd]
# Source node to ATen node mapping:
# pad => constant_pad_nd
# Graph fragment:
# %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%arg0_1, [4, 4], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x1 = (xindex // 12)
x2 = xindex
tmp0 = (-4) + x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + ((-4) + x0 + (4*x1)), tmp5 & xmask, other=0.0)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 12), (192, 48, 12, 1), torch.float32)
# Topologically Sorted Source Nodes: [pad], Original ATen: [aten.constant_pad_nd]
stream0 = get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0.run(arg0_1, buf0, 768, grid=grid(768), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.optim.lr_scheduler
import torch.utils.data
import torch.onnx.operators
import torch.optim
class ZeroPad1d(nn.Module):
def __init__(self, pad_left, pad_right):
super().__init__()
self.pad_left = pad_left
self.pad_right = pad_right
def forward(self, x):
return F.pad(x, (self.pad_left, self.pad_right))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'pad_left': 4, 'pad_right': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.optim.lr_scheduler
import torch.utils.data
import torch.onnx.operators
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x1 = xindex // 12
x2 = xindex
tmp0 = -4 + x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (-4 + x0 + 4 * x1), tmp5 & xmask, other=0.0)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 12), (192, 48, 12, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(768)](arg0_1, buf0, 768,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ZeroPad1dNew(nn.Module):
def __init__(self, pad_left, pad_right):
super().__init__()
self.pad_left = pad_left
self.pad_right = pad_right
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Fei00Wu/espresso | ZeroPad1d | false | 2,395 | [
"MIT"
] | 0 | 4e8e6e2f9151a87448845c5142611c103dd4580c | https://github.com/Fei00Wu/espresso/tree/4e8e6e2f9151a87448845c5142611c103dd4580c | import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.optim.lr_scheduler
import torch.utils.data
import torch.onnx.operators
import torch.optim
class Model(nn.Module):
def __init__(self, pad_left, pad_right):
super().__init__()
self.pad_left = pad_left
self.pad_right = pad_right
def forward(self, x):
return F.pad(x, (self.pad_left, self.pad_right))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
VDNNet | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/7o/c7otc5ij6whexgxcr56vlxp2l7hzg3oc4onljp557uc6wncu5gvg.py
# Topologically Sorted Source Nodes: [sum_1], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_1 => sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg0_1, [1]), kwargs = {})
triton_poi_fused_sum_0 = async_compile.triton('triton_poi_fused_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_1], Original ATen: [aten.sum]
stream0 = get_raw_stream(0)
triton_poi_fused_sum_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class VDNNet(nn.Module):
def __init__(self):
super(VDNNet, self).__init__()
@staticmethod
def forward(q_values):
return torch.sum(q_values, dim=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_sum_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class VDNNetNew(nn.Module):
def __init__(self):
super(VDNNetNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| JJBong/marl | VDNNet | false | 2,396 | [
"MIT"
] | 0 | 836ea6b478787a728506b6de3c551ce6b10f9ba4 | https://github.com/JJBong/marl/tree/836ea6b478787a728506b6de3c551ce6b10f9ba4 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
@staticmethod
def forward(q_values):
return torch.sum(q_values, dim=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
NormedLinear | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/zk/czk5xfokmwnuegxn53eciq25366p2is3a6lxx47tlosf3q225vha.py
# Topologically Sorted Source Nodes: [normalize], Original ATen: [aten.div]
# Source node to ATen node mapping:
# normalize => div
# Graph fragment:
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %expand), kwargs = {})
triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/xe/cxewggzrfqe57dzglxrzfhfgpsywlh36utvtdulp5oi75wfs7ml3.py
# Topologically Sorted Source Nodes: [normalize_1], Original ATen: [aten.div]
# Source node to ATen node mapping:
# normalize_1 => div_1
# Graph fragment:
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_2, %expand_1), kwargs = {})
triton_poi_fused_div_1 = async_compile.triton('triton_poi_fused_div_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [normalize], Original ATen: [aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [normalize_1], Original ATen: [aten.div]
triton_poi_fused_div_1.run(primals_2, buf1, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [normalize_1, out], Original ATen: [aten.div, aten.mm]
extern_kernels.mm(buf0, buf1, out=buf2)
del buf1
return (buf2, primals_2, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from torch.nn import functional as F
from torch.nn import Parameter
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
class NormedLinear(nn.Module):
def __init__(self, in_features, out_features):
super(NormedLinear, self).__init__()
self.weight = Parameter(torch.Tensor(in_features, out_features))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-05).mul_(100000.0)
def forward(self, x):
out = F.normalize(x, dim=1).mm(F.normalize(self.weight, dim=0))
return out
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
from torch.nn import Parameter
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_div_1[grid(16)](primals_2, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, buf1, out=buf2)
del buf1
return buf2, primals_2, reinterpret_tensor(buf0, (4, 4), (1, 4), 0)
class NormedLinearNew(nn.Module):
def __init__(self, in_features, out_features):
super(NormedLinearNew, self).__init__()
self.weight = Parameter(torch.Tensor(in_features, out_features))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-05).mul_(100000.0)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| IssacCyj/imbalanced-semi-self | NormedLinear | false | 2,397 | [
"MIT"
] | 0 | 33ef166532c94c7ac65b41238c751b0a5369262b | https://github.com/IssacCyj/imbalanced-semi-self/tree/33ef166532c94c7ac65b41238c751b0a5369262b | import torch
from torch import nn
from torch.nn import functional as F
from torch.nn import Parameter
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
class Model(nn.Module):
def __init__(self, in_features, out_features):
super().__init__()
self.weight = Parameter(torch.Tensor(in_features, out_features))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-05).mul_(100000.0)
def forward(self, x):
out = F.normalize(x, dim=1).mm(F.normalize(self.weight, dim=0))
return out
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [4, 4]
|
FocalLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/td/ctdj5kazgiki6gdaadhqtp2x7tq2ee5ey5hqqdcoqmp54jyhf74f.py
# Topologically Sorted Source Nodes: [cross_entropy], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# cross_entropy => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax), kwargs = {})
triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/s5/cs5wshnrdka3xma3btqijhothwpkw4ctmtyvsdzkv6seotnt4jpf.py
# Topologically Sorted Source Nodes: [cross_entropy, neg, p, sub, pow_1, loss, mean], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.exp, aten.rsub, aten.pow, aten.mean]
# Source node to ATen node mapping:
# cross_entropy => exp, log, mul, neg, sub_1, sum_1, sum_2
# loss => mul_1
# mean => mean
# neg => neg_1
# p => exp_1
# pow_1 => pow_1
# sub => sub_2
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %arg0_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
# %neg : [num_users=2] = call_function[target=torch.ops.aten.neg.default](args = (%sum_2,), kwargs = {})
# %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%neg,), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg_1,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %exp_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 0.0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, %neg), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_1,), kwargs = {})
triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1 = async_compile.triton('triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = (rindex // 16)
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None)
tmp2 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None)
tmp5 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None)
tmp8 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None)
tmp13 = tl.load(in_ptr1 + (r0 + (64*r1)), None)
tmp16 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None)
tmp20 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None)
tmp24 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None)
tmp1 = tl_math.exp(tmp0)
tmp3 = tl_math.exp(tmp2)
tmp4 = tmp1 + tmp3
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp4 + tmp6
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tl_math.log(tmp10)
tmp12 = tmp0 - tmp11
tmp14 = tmp12 * tmp13
tmp15 = tmp2 - tmp11
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp19 = tmp5 - tmp11
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp8 - tmp11
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = -tmp26
tmp28 = -tmp27
tmp29 = tl_math.exp(tmp28)
tmp30 = 1.0
tmp31 = tmp30 - tmp29
tmp32 = tmp30 * tmp27
tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK])
tmp35 = tl.sum(tmp33, 1)[:, None]
tmp36 = 64.0
tmp37 = tmp35 / tmp36
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp37, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cross_entropy], Original ATen: [aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg1_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [cross_entropy, neg, p, sub, pow_1, loss, mean], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.exp, aten.rsub, aten.pow, aten.mean]
triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1.run(buf3, buf0, arg0_1, 1, 64, grid=grid(1), stream=stream0)
del arg0_1
del buf0
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from torch.nn import functional as F
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
def focal_loss(input_values, gamma):
p = torch.exp(-input_values)
loss = (1 - p) ** gamma * input_values
return loss.mean()
class FocalLoss(nn.Module):
def __init__(self, weight=None, gamma=0.0):
super(FocalLoss, self).__init__()
assert gamma >= 0
self.gamma = gamma
self.weight = weight
def forward(self, input, target):
return focal_loss(F.cross_entropy(input, target, reduction='none',
weight=self.weight), self.gamma)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp8 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp13 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp16 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp20 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp24 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp1 = tl_math.exp(tmp0)
tmp3 = tl_math.exp(tmp2)
tmp4 = tmp1 + tmp3
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp4 + tmp6
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tl_math.log(tmp10)
tmp12 = tmp0 - tmp11
tmp14 = tmp12 * tmp13
tmp15 = tmp2 - tmp11
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp19 = tmp5 - tmp11
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp8 - tmp11
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = -tmp26
tmp28 = -tmp27
tmp29 = tl_math.exp(tmp28)
tmp30 = 1.0
tmp30 - tmp29
tmp32 = tmp30 * tmp27
tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK])
tmp35 = tl.sum(tmp33, 1)[:, None]
tmp36 = 64.0
tmp37 = tmp35 / tmp36
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp37, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1[grid(1)](
buf3, buf0, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del buf0
return buf3,
def focal_loss(input_values, gamma):
p = torch.exp(-input_values)
loss = (1 - p) ** gamma * input_values
return loss.mean()
class FocalLossNew(nn.Module):
def __init__(self, weight=None, gamma=0.0):
super(FocalLossNew, self).__init__()
assert gamma >= 0
self.gamma = gamma
self.weight = weight
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| IssacCyj/imbalanced-semi-self | FocalLoss | false | 2,398 | [
"MIT"
] | 0 | 33ef166532c94c7ac65b41238c751b0a5369262b | https://github.com/IssacCyj/imbalanced-semi-self/tree/33ef166532c94c7ac65b41238c751b0a5369262b | import torch
from torch import nn
from torch.nn import functional as F
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
def focal_loss(input_values, gamma):
p = torch.exp(-input_values)
loss = (1 - p) ** gamma * input_values
return loss.mean()
class Model(nn.Module):
def __init__(self, weight=None, gamma=0.0):
super().__init__()
assert gamma >= 0
self.gamma = gamma
self.weight = weight
def forward(self, input, target):
return focal_loss(F.cross_entropy(input, target, reduction='none',
weight=self.weight), self.gamma)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
L2Loss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/za/czawwvv26coouabk4n6w7mpraun44c2xseki6ksot4t5xm4gt4xm.py
# Topologically Sorted Source Nodes: [sub, pow_1, sum_1, truediv], Original ATen: [aten.sub, aten.pow, aten.sum, aten.div]
# Source node to ATen node mapping:
# pow_1 => pow_1
# sub => sub
# sum_1 => sum_1
# truediv => div
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, 256), kwargs = {})
triton_per_fused_div_pow_sub_sum_0 = async_compile.triton('triton_per_fused_div_pow_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_pow_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 0.00390625
tmp8 = tmp6 * tmp7
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp8, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sub, pow_1, sum_1, truediv], Original ATen: [aten.sub, aten.pow, aten.sum, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_div_pow_sub_sum_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch as th
from functools import *
class L2Loss(nn.Module):
def __init__(self):
super(L2Loss, self).__init__()
def forward(self, grad_fake, grad_real):
num_pixels = reduce(lambda x, y: x * y, grad_real.size())
return th.sum(th.pow(grad_real - grad_fake, 2)) / num_pixels
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
from functools import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 0.00390625
tmp8 = tmp6 * tmp7
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp8, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_div_pow_sub_sum_0[grid(1)](buf1, arg0_1, arg1_1, 1,
256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class L2LossNew(nn.Module):
def __init__(self):
super(L2LossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| JaviBite/TFG | L2Loss | false | 2,399 | [
"MIT"
] | 0 | e406580697132f53b63a7c983daaa098af45b52c | https://github.com/JaviBite/TFG/tree/e406580697132f53b63a7c983daaa098af45b52c | import torch
from torch import nn
import torch as th
from functools import *
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, grad_fake, grad_real):
num_pixels = reduce(lambda x, y: x * y, grad_real.size())
return th.sum(th.pow(grad_real - grad_fake, 2)) / num_pixels
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
SphereMSE | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/yz/cyz3o572ukqgm2jqaxtxqylj5b3ytu5rg2zcfv2ntpoy4zhkdxal.py
# Topologically Sorted Source Nodes: [sub, pow_1, mul, sum_1, truediv], Original ATen: [aten.sub, aten.pow, aten.mul, aten.sum, aten.div]
# Source node to ATen node mapping:
# mul => mul
# pow_1 => pow_1
# sub => sub
# sum_1 => sum_1
# truediv => div
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, %arg2_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, 4), kwargs = {})
triton_per_fused_div_mul_pow_sub_sum_0 = async_compile.triton('triton_per_fused_div_mul_pow_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mul_pow_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r2 = rindex
r0 = rindex % 16
tmp0 = tl.load(in_ptr0 + (r2), None)
tmp1 = tl.load(in_ptr1 + (r2), None)
tmp4 = tl.load(in_ptr2 + (r0), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp5 = tmp3 * tmp4
tmp6 = tl.broadcast_to(tmp5, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp10, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (1, 1, 4, 4), (16, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sub, pow_1, mul, sum_1, truediv], Original ATen: [aten.sub, aten.pow, aten.mul, aten.sum, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_div_mul_pow_sub_sum_0.run(buf1, arg0_1, arg1_1, arg2_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((1, 1, 4, 4), (16, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
from numpy import pi
from torch import nn
from torch.nn.parameter import Parameter
import torch as th
from torch.nn import Parameter
from functools import *
class SphereMSE(nn.Module):
def __init__(self, h, w):
super(SphereMSE, self).__init__()
self.h, self.w = h, w
weight = th.zeros(1, 1, h, w)
theta_range = th.linspace(0, pi, steps=h + 1)
dtheta = pi / h
dphi = 2 * pi / w
for theta_idx in range(h):
weight[:, :, theta_idx, :] = dphi * (math.sin(theta_range[
theta_idx]) + math.sin(theta_range[theta_idx + 1])
) / 2 * dtheta
self.weight = Parameter(weight, requires_grad=False)
def forward(self, out, target):
return th.sum((out - target) ** 2 * self.weight) / out.size(0)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'h': 4, 'w': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import math
from numpy import pi
from torch import nn
from torch.nn.parameter import Parameter
import torch as th
from torch.nn import Parameter
from functools import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex
r0 = rindex % 16
tmp0 = tl.load(in_ptr0 + r2, None)
tmp1 = tl.load(in_ptr1 + r2, None)
tmp4 = tl.load(in_ptr2 + r0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp5 = tmp3 * tmp4
tmp6 = tl.broadcast_to(tmp5, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (1, 1, 4, 4), (16, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_div_mul_pow_sub_sum_0[grid(1)](buf1, arg0_1,
arg1_1, arg2_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf1,
class SphereMSENew(nn.Module):
def __init__(self, h, w):
super(SphereMSENew, self).__init__()
self.h, self.w = h, w
weight = th.zeros(1, 1, h, w)
theta_range = th.linspace(0, pi, steps=h + 1)
dtheta = pi / h
dphi = 2 * pi / w
for theta_idx in range(h):
weight[:, :, theta_idx, :] = dphi * (math.sin(theta_range[
theta_idx]) + math.sin(theta_range[theta_idx + 1])
) / 2 * dtheta
self.weight = Parameter(weight, requires_grad=False)
def forward(self, input_0, input_1):
arg2_1 = self.weight
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| JaviBite/TFG | SphereMSE | false | 2,400 | [
"MIT"
] | 0 | e406580697132f53b63a7c983daaa098af45b52c | https://github.com/JaviBite/TFG/tree/e406580697132f53b63a7c983daaa098af45b52c | import math
import torch
from numpy import pi
from torch import nn
from torch.nn.parameter import Parameter
import torch as th
from torch.nn import Parameter
from functools import *
class Model(nn.Module):
def __init__(self, h, w):
super().__init__()
self.h, self.w = h, w
weight = th.zeros(1, 1, h, w)
theta_range = th.linspace(0, pi, steps=h + 1)
dtheta = pi / h
dphi = 2 * pi / w
for theta_idx in range(h):
weight[:, :, theta_idx, :] = dphi * (math.sin(theta_range[
theta_idx]) + math.sin(theta_range[theta_idx + 1])
) / 2 * dtheta
self.weight = Parameter(weight, requires_grad=False)
def forward(self, out, target):
return th.sum((out - target) ** 2 * self.weight) / out.size(0)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
UniformBoxWarp | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/2i/c2if2yhrux7a2hf5x6cptfa3iexq65bs7d3zjjyatstrnnog3cdz.py
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.5), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class UniformBoxWarp(nn.Module):
def __init__(self, sidelength):
super().__init__()
self.scale_factor = 2 / sidelength
def forward(self, coordinates):
return coordinates * self.scale_factor
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'sidelength': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class UniformBoxWarpNew(nn.Module):
def __init__(self, sidelength):
super().__init__()
self.scale_factor = 2 / sidelength
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| HexagonPrime/pixel-nerf | UniformBoxWarp | false | 2,401 | [
"BSD-2-Clause"
] | 0 | 298aa7a3451c01e6f19f73f0c756672d3de54bf9 | https://github.com/HexagonPrime/pixel-nerf/tree/298aa7a3451c01e6f19f73f0c756672d3de54bf9 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, sidelength):
super().__init__()
self.scale_factor = 2 / sidelength
def forward(self, coordinates):
return coordinates * self.scale_factor
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
MultiHeadAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ez/cezmv74yrhrunjwqrletcmzzbnanma4ylsle3v7w345t7kxp622s.py
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_2,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/sp/cspqqt75qs7ffrb3lysy45iuc7wyhwgdjk7rscety2hozovgu3iw.py
# Topologically Sorted Source Nodes: [attention_2], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention_2 => exp
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 1.0), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/jo/cjooyf2taupk6b3rhpvd4u5im6tyfn25cyirn5yix7vtprzujjxg.py
# Topologically Sorted Source Nodes: [attention_2], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention_2 => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ii/ciibxqsyzwu4mgbmyht7liy2wshsevl3nzbgoqgw33bbcrrvlnxj.py
# Topologically Sorted Source Nodes: [output_3], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# output_3 => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_15, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_3 = async_compile.triton('triton_poi_fused_native_layer_norm_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/yd/cydu4qjvgfymhhpyhhqxb3snu5mgfygbkhn2nemqx5nozidar6fc.py
# Topologically Sorted Source Nodes: [output_3], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# output_3 => add, add_1, mul_1, mul_2, rsqrt, sub_1, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_15, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_15, %getitem_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %primals_8), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_9), kwargs = {})
triton_poi_fused_native_layer_norm_4 = async_compile.triton('triton_poi_fused_native_layer_norm_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf0, buf2, 64, 4, grid=grid(64, 4), stream=stream0)
buf3 = reinterpret_tensor(buf0, (16, 4, 4, 1), (16, 4, 1, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [contiguous_1], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf1, buf3, 64, 4, grid=grid(64, 4), stream=stream0)
buf4 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf2, (64, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf3, (64, 1, 4), (4, 0, 1), 0), out=buf4)
buf5 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attention_2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf4, buf5, 1024, grid=grid(1024), stream=stream0)
buf6 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [attention_2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf5, buf6, 1024, grid=grid(1024), stream=stream0)
del buf5
buf7 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf7)
del primals_5
buf8 = empty_strided_cuda((16, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous_2], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf7, buf8, 64, 4, grid=grid(64, 4), stream=stream0)
buf9 = reinterpret_tensor(buf7, (64, 4, 1), (4, 1, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [bmm_1], Original ATen: [aten.bmm]
extern_kernels.bmm(buf6, reinterpret_tensor(buf8, (64, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((16, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf9, buf10, 64, 4, grid=grid(64, 4), stream=stream0)
buf11 = reinterpret_tensor(buf9, (64, 4), (4, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [output_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf10, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf11)
del primals_7
buf12 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf13 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [output_3], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_3.run(buf11, buf12, buf13, 64, grid=grid(64), stream=stream0)
buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output_3], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_4.run(buf11, buf12, buf13, primals_8, primals_9, buf14, 256, grid=grid(256), stream=stream0)
del buf12
del buf13
del primals_9
return (buf14, reinterpret_tensor(buf6, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_8, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), buf6, reinterpret_tensor(buf10, (64, 4), (4, 1), 0), buf11, primals_6, reinterpret_tensor(buf8, (64, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf2, (64, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (64, 4, 1), (4, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
class MultiHeadAttention(nn.Module):
def __init__(self, in_dim, out_dim, out_heads, relation_dim=0, residual
=False, projection=True, layer_norm=True):
super().__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.out_heads = out_heads
self.relation_dim = relation_dim
assert self.out_dim % self.out_heads == 0
self.query_layer = nn.Linear(self.in_dim + self.relation_dim, self.
out_dim, bias=False)
self.key_layer = nn.Linear(self.in_dim + self.relation_dim, self.
out_dim, bias=False)
self.value_layer = nn.Linear(self.in_dim, self.out_dim, bias=False)
self.residual = residual
self.projection = projection
if self.projection:
self.proj_layer = nn.Linear(self.out_dim, self.out_dim)
self.layer_norm = layer_norm
if self.layer_norm:
self.ln = nn.LayerNorm(self.out_dim)
self.reset_parameters()
def reset_parameters(self):
nn.init.uniform_(self.query_layer.weight, -0.1, 0.1)
nn.init.uniform_(self.key_layer.weight, -0.1, 0.1)
nn.init.uniform_(self.value_layer.weight, -0.1, 0.1)
if self.projection:
nn.init.uniform_(self.proj_layer.weight, -0.1, 0.1)
def forward(self, query, key, relation=None, mask=None, key_mask=None,
distance=None):
"""
Args:
query (torch.Tensor): [batch, query_len, in_dim]
key (torch.Tensor): [batch, key_len, in_dim]
relation (torch.Tensor): [batch, query_len, key_len, relation_dim]
mask (torch.Tensor): [batch, query_len]
key_mask (torch.Tensor): [batch, key_len]
Returns:
torch.Tensor: [batch, query_len, out_dim]
"""
query_len = query.size(-2)
key_len = key.size(-2)
head_dim = self.out_dim // self.out_heads
if key_mask is None:
if torch.equal(query, key):
key_mask = mask
if relation is not None:
relation = relation.view(-1, query_len, key_len, self.relation_dim)
query_ = query.view(-1, query_len, 1, self.in_dim).repeat(1, 1,
key_len, 1)
query_ = torch.cat([query_, relation], dim=-1)
key_ = key.view(-1, 1, key_len, self.in_dim).repeat(1,
query_len, 1, 1)
key_ = torch.cat([key_, relation], dim=-1)
Q = self.query_layer(query_).view(-1, query_len * key_len, self
.out_heads, head_dim)
K = self.key_layer(key_).view(-1, query_len * key_len, self.
out_heads, head_dim)
Q = Q.transpose(1, 2).contiguous().view(-1, query_len, key_len,
head_dim)
K = K.transpose(1, 2).contiguous().view(-1, query_len, key_len,
head_dim)
attention = (Q * K).sum(dim=-1)
else:
Q = self.query_layer(query).view(-1, query_len, self.out_heads,
head_dim)
K = self.key_layer(key).view(-1, key_len, self.out_heads, head_dim)
Q = Q.transpose(1, 2).contiguous().view(-1, query_len, head_dim)
K = K.transpose(1, 2).contiguous().view(-1, key_len, head_dim)
attention = torch.bmm(Q, K.transpose(1, 2))
if distance is not None:
attention = attention - torch.log1p(distance.repeat(self.
out_heads, 1, 1))
attention = attention * float(head_dim) ** -0.5
if key_mask is not None:
attention = attention.view(-1, self.out_heads, query_len, key_len)
attention = attention + ((1 - key_mask) * -1e+32).view(-1, 1, 1,
key_len)
attention = F.softmax(attention, dim=-1)
if mask is not None:
attention = attention * mask.view(-1, 1, query_len, 1)
attention = attention.contiguous().view(-1, query_len, key_len)
V = self.value_layer(key).view(-1, key_len, self.out_heads, head_dim)
V = V.transpose(1, 2).contiguous().view(-1, key_len, head_dim)
output = torch.bmm(attention, V).view(-1, self.out_heads, query_len,
head_dim)
output = output.transpose(1, 2).contiguous().view(*query.size()[:-2
], query_len, self.out_dim)
if self.projection:
output = self.proj_layer(output)
if self.residual:
output = output + query
if self.layer_norm:
output = self.ln(output)
if mask is not None:
output = output * mask.unsqueeze(-1)
attention = attention.view(*query.size()[:-2], self.out_heads,
query_len, key_len).detach()
return output, attention
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'out_dim': 4, 'out_heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64, 4)](buf0, buf2, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf0, (16, 4, 4, 1), (16, 4, 1, 1), 0)
del buf0
triton_poi_fused_clone_0[grid(64, 4)](buf1, buf3, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf2, (64, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf3, (64, 1, 4), (4, 0, 1), 0), out=buf4)
buf5 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(1024)](buf4, buf5, 1024, XBLOCK=
128, num_warps=4, num_stages=1)
buf6 = buf4
del buf4
triton_poi_fused__softmax_2[grid(1024)](buf5, buf6, 1024, XBLOCK=
256, num_warps=4, num_stages=1)
del buf5
buf7 = buf1
del buf1
extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf7)
del primals_5
buf8 = empty_strided_cuda((16, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_0[grid(64, 4)](buf7, buf8, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf9 = reinterpret_tensor(buf7, (64, 4, 1), (4, 1, 1), 0)
del buf7
extern_kernels.bmm(buf6, reinterpret_tensor(buf8, (64, 4, 1), (4, 1,
0), 0), out=buf9)
buf10 = empty_strided_cuda((16, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_0[grid(64, 4)](buf9, buf10, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf11 = reinterpret_tensor(buf9, (64, 4), (4, 1), 0)
del buf9
extern_kernels.addmm(primals_7, reinterpret_tensor(buf10, (64, 4),
(4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf11)
del primals_7
buf12 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf13 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused_native_layer_norm_3[grid(64)](buf11, buf12, buf13,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_4[grid(256)](buf11, buf12, buf13,
primals_8, primals_9, buf14, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del buf12
del buf13
del primals_9
return buf14, reinterpret_tensor(buf6, (4, 4, 4, 4, 4), (256, 64, 16, 4,
1), 0), primals_8, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_4, (64, 4), (4, 1), 0
), buf6, reinterpret_tensor(buf10, (64, 4), (4, 1), 0
), buf11, primals_6, reinterpret_tensor(buf8, (64, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf2, (64, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (64, 4, 1), (4, 1, 1), 0)
class MultiHeadAttentionNew(nn.Module):
def __init__(self, in_dim, out_dim, out_heads, relation_dim=0, residual
=False, projection=True, layer_norm=True):
super().__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.out_heads = out_heads
self.relation_dim = relation_dim
assert self.out_dim % self.out_heads == 0
self.query_layer = nn.Linear(self.in_dim + self.relation_dim, self.
out_dim, bias=False)
self.key_layer = nn.Linear(self.in_dim + self.relation_dim, self.
out_dim, bias=False)
self.value_layer = nn.Linear(self.in_dim, self.out_dim, bias=False)
self.residual = residual
self.projection = projection
if self.projection:
self.proj_layer = nn.Linear(self.out_dim, self.out_dim)
self.layer_norm = layer_norm
if self.layer_norm:
self.ln = nn.LayerNorm(self.out_dim)
self.reset_parameters()
def reset_parameters(self):
nn.init.uniform_(self.query_layer.weight, -0.1, 0.1)
nn.init.uniform_(self.key_layer.weight, -0.1, 0.1)
nn.init.uniform_(self.value_layer.weight, -0.1, 0.1)
if self.projection:
nn.init.uniform_(self.proj_layer.weight, -0.1, 0.1)
def forward(self, input_0, input_1):
primals_1 = self.query_layer.weight
primals_3 = self.key_layer.weight
primals_5 = self.value_layer.weight
primals_6 = self.proj_layer.weight
primals_7 = self.proj_layer.bias
primals_8 = self.ln.weight
primals_9 = self.ln.bias
primals_2 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0], output[1]
| Hcnaeg/DI-engine | MultiHeadAttention | false | 2,402 | [
"Apache-2.0"
] | 0 | aba0c629f87649854091e9e59d948f83962e3e1e | https://github.com/Hcnaeg/DI-engine/tree/aba0c629f87649854091e9e59d948f83962e3e1e | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
class Model(nn.Module):
def __init__(self, in_dim, out_dim, out_heads, relation_dim=0, residual
=False, projection=True, layer_norm=True):
super().__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.out_heads = out_heads
self.relation_dim = relation_dim
assert self.out_dim % self.out_heads == 0
self.query_layer = nn.Linear(self.in_dim + self.relation_dim, self.
out_dim, bias=False)
self.key_layer = nn.Linear(self.in_dim + self.relation_dim, self.
out_dim, bias=False)
self.value_layer = nn.Linear(self.in_dim, self.out_dim, bias=False)
self.residual = residual
self.projection = projection
if self.projection:
self.proj_layer = nn.Linear(self.out_dim, self.out_dim)
self.layer_norm = layer_norm
if self.layer_norm:
self.ln = nn.LayerNorm(self.out_dim)
self.reset_parameters()
def reset_parameters(self):
nn.init.uniform_(self.query_layer.weight, -0.1, 0.1)
nn.init.uniform_(self.key_layer.weight, -0.1, 0.1)
nn.init.uniform_(self.value_layer.weight, -0.1, 0.1)
if self.projection:
nn.init.uniform_(self.proj_layer.weight, -0.1, 0.1)
def forward(self, query, key, relation=None, mask=None, key_mask=None,
distance=None):
"""
Args:
query (torch.Tensor): [batch, query_len, in_dim]
key (torch.Tensor): [batch, key_len, in_dim]
relation (torch.Tensor): [batch, query_len, key_len, relation_dim]
mask (torch.Tensor): [batch, query_len]
key_mask (torch.Tensor): [batch, key_len]
Returns:
torch.Tensor: [batch, query_len, out_dim]
"""
query_len = query.size(-2)
key_len = key.size(-2)
head_dim = self.out_dim // self.out_heads
if key_mask is None:
if torch.equal(query, key):
key_mask = mask
if relation is not None:
relation = relation.view(-1, query_len, key_len, self.relation_dim)
query_ = query.view(-1, query_len, 1, self.in_dim).repeat(1, 1,
key_len, 1)
query_ = torch.cat([query_, relation], dim=-1)
key_ = key.view(-1, 1, key_len, self.in_dim).repeat(1,
query_len, 1, 1)
key_ = torch.cat([key_, relation], dim=-1)
Q = self.query_layer(query_).view(-1, query_len * key_len, self
.out_heads, head_dim)
K = self.key_layer(key_).view(-1, query_len * key_len, self.
out_heads, head_dim)
Q = Q.transpose(1, 2).contiguous().view(-1, query_len, key_len,
head_dim)
K = K.transpose(1, 2).contiguous().view(-1, query_len, key_len,
head_dim)
attention = (Q * K).sum(dim=-1)
else:
Q = self.query_layer(query).view(-1, query_len, self.out_heads,
head_dim)
K = self.key_layer(key).view(-1, key_len, self.out_heads, head_dim)
Q = Q.transpose(1, 2).contiguous().view(-1, query_len, head_dim)
K = K.transpose(1, 2).contiguous().view(-1, key_len, head_dim)
attention = torch.bmm(Q, K.transpose(1, 2))
if distance is not None:
attention = attention - torch.log1p(distance.repeat(self.
out_heads, 1, 1))
attention = attention * float(head_dim) ** -0.5
if key_mask is not None:
attention = attention.view(-1, self.out_heads, query_len, key_len)
attention = attention + ((1 - key_mask) * -1e+32).view(-1, 1, 1,
key_len)
attention = F.softmax(attention, dim=-1)
if mask is not None:
attention = attention * mask.view(-1, 1, query_len, 1)
attention = attention.contiguous
# ... truncated (>4000 chars) for memory efficiency |
MLP | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/zi/czi6taqk3yywywfl3iwbejutxysbxi6hrg6s2rrrevzoemnmagnw.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_6, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x4), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/6h/c6hgrncbhy7kjladlqflhqnw52mciqxt6qj53hxyw2giskevmcnl.py
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.view]
# Source node to ATen node mapping:
# linear_1 => view_7
# Graph fragment:
# %view_7 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%view_6, [64, 4]), kwargs = {})
triton_poi_fused_view_1 = async_compile.triton('triton_poi_fused_view_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_view_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*((x1 % 4) // 4)) + (64*(((4*((x1 // 4) % 4)) + (x1 % 4)) // 16))), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf4, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.view]
triton_poi_fused_view_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0)
buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_5
return (reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, primals_4, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class FC(nn.Module):
def __init__(self, in_size, out_size, dropout_r=0.0, use_relu=True):
super(FC, self).__init__()
self.dropout_r = dropout_r
self.use_relu = use_relu
self.linear = nn.Linear(in_size, out_size)
if use_relu:
self.relu = nn.ReLU(inplace=True)
if dropout_r > 0:
self.dropout = nn.Dropout(dropout_r)
def forward(self, x):
x = self.linear(x)
if self.use_relu:
x = self.relu(x)
if self.dropout_r > 0:
x = self.dropout(x)
return x
class MLP(nn.Module):
def __init__(self, in_size, mid_size, out_size, dropout_r=0.0, use_relu
=True):
super(MLP, self).__init__()
self.fc = FC(in_size, mid_size, dropout_r=dropout_r, use_relu=use_relu)
self.linear = nn.Linear(mid_size, out_size)
def forward(self, x):
return self.linear(self.fc(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_size': 4, 'mid_size': 4, 'out_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + x4, tmp6, xmask)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * (x1 % 4 // 4) + 64 * ((4 *
(x1 // 4 % 4) + x1 % 4) // 16)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused_view_1[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0)
del buf1
extern_kernels.addmm(primals_5, buf2, reinterpret_tensor(primals_4,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_5
return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf2, primals_4, buf4
class FC(nn.Module):
def __init__(self, in_size, out_size, dropout_r=0.0, use_relu=True):
super(FC, self).__init__()
self.dropout_r = dropout_r
self.use_relu = use_relu
self.linear = nn.Linear(in_size, out_size)
if use_relu:
self.relu = nn.ReLU(inplace=True)
if dropout_r > 0:
self.dropout = nn.Dropout(dropout_r)
def forward(self, x):
x = self.linear(x)
if self.use_relu:
x = self.relu(x)
if self.dropout_r > 0:
x = self.dropout(x)
return x
class MLPNew(nn.Module):
def __init__(self, in_size, mid_size, out_size, dropout_r=0.0, use_relu
=True):
super(MLPNew, self).__init__()
self.fc = FC(in_size, mid_size, dropout_r=dropout_r, use_relu=use_relu)
self.linear = nn.Linear(mid_size, out_size)
def forward(self, input_0):
primals_1 = self.fc.linear.weight
primals_2 = self.fc.linear.bias
primals_4 = self.linear.weight
primals_5 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| JayZhu0104/openvqa | MLP | false | 2,403 | [
"Apache-2.0"
] | 0 | cc2a92dccb08fb87506d5d0dede7dcfa3a1997aa | https://github.com/JayZhu0104/openvqa/tree/cc2a92dccb08fb87506d5d0dede7dcfa3a1997aa | import torch
import torch.nn as nn
class FC(nn.Module):
def __init__(self, in_size, out_size, dropout_r=0.0, use_relu=True):
super().__init__()
self.dropout_r = dropout_r
self.use_relu = use_relu
self.linear = nn.Linear(in_size, out_size)
if use_relu:
self.relu = nn.ReLU(inplace=True)
if dropout_r > 0:
self.dropout = nn.Dropout(dropout_r)
def forward(self, x):
x = self.linear(x)
if self.use_relu:
x = self.relu(x)
if self.dropout_r > 0:
x = self.dropout(x)
return x
class Model(nn.Module):
def __init__(self, in_size, mid_size, out_size, dropout_r=0.0, use_relu
=True):
super().__init__()
self.fc = FC(in_size, mid_size, dropout_r=dropout_r, use_relu=use_relu)
self.linear = nn.Linear(mid_size, out_size)
def forward(self, x):
return self.linear(self.fc(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
RelPositionMultiHeadedAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/st/cstdkeotatufdikalo45nsknyfku4ofvhlziami6ybawevy72lal.py
# Topologically Sorted Source Nodes: [matrix_ac, matrix_bd], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matrix_ac => clone
# matrix_bd => clone_2
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
# %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_2,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (y0), ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp2 + tmp5
tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask & ymask)
tl.store(out_ptr1 + (x2 + (4*y3)), tmp6, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/mh/cmhet4vfl4jlxtge4zzaaa2nugvxpr5f4ge7rs72qwe2aow7vxwy.py
# Topologically Sorted Source Nodes: [matrix_ac], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matrix_ac => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/xp/cxp3ouwpdhdlmipppq44wjaey2obmthzec7uqoddmpoigfmupxdx.py
# Topologically Sorted Source Nodes: [matrix_bd], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matrix_bd => clone_3
# Graph fragment:
# %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/u3/cu3opkmzrgckmrk64elyf7ml2a2gq2grpcbtiejtq54tloftwrmr.py
# Topologically Sorted Source Nodes: [mask], Original ATen: [aten.eq]
# Source node to ATen node mapping:
# mask => eq
# Graph fragment:
# %eq : [num_users=3] = call_function[target=torch.ops.aten.eq.Scalar](args = (%unsqueeze, 0), kwargs = {})
triton_poi_fused_eq_3 = async_compile.triton('triton_poi_fused_eq_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_eq_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_eq_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/el/celmnq4cmthcneo2qgxp66qy3keynodxqjqu5pn2ewxs4o4pk4pv.py
# Topologically Sorted Source Nodes: [add_2, scores, scores_1, softmax], Original ATen: [aten.add, aten.div, aten.masked_fill, aten._softmax]
# Source node to ATen node mapping:
# add_2 => add_2
# scores => div
# scores_1 => full_default, where
# softmax => amax, exp, sub, sum_1
# Graph fragment:
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_14, %view_17), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_2, 1.0), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -inf), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %div), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
triton_poi_fused__softmax_add_div_masked_fill_4 = async_compile.triton('triton_poi_fused__softmax_add_div_masked_fill_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_div_masked_fill_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_div_masked_fill_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + ((4*x0) + (16*x2)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp1 = tl.load(in_ptr1 + (4*x3), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (4*x3), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp9 = tl.load(in_ptr1 + (1 + (4*x3)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (1 + (4*x3)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (2 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp16 = tl.load(in_ptr1 + (2 + (4*x3)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr2 + (2 + (4*x3)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr0 + (3 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp23 = tl.load(in_ptr1 + (3 + (4*x3)), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr2 + (3 + (4*x3)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = float("-inf")
tmp7 = tl.where(tmp0, tmp6, tmp5)
tmp11 = tmp9 + tmp10
tmp12 = tmp11 * tmp4
tmp13 = tl.where(tmp8, tmp6, tmp12)
tmp14 = triton_helpers.maximum(tmp7, tmp13)
tmp18 = tmp16 + tmp17
tmp19 = tmp18 * tmp4
tmp20 = tl.where(tmp15, tmp6, tmp19)
tmp21 = triton_helpers.maximum(tmp14, tmp20)
tmp25 = tmp23 + tmp24
tmp26 = tmp25 * tmp4
tmp27 = tl.where(tmp22, tmp6, tmp26)
tmp28 = triton_helpers.maximum(tmp21, tmp27)
tmp29 = tmp7 - tmp28
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp13 - tmp28
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp30 + tmp32
tmp34 = tmp20 - tmp28
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp33 + tmp35
tmp37 = tmp27 - tmp28
tmp38 = tl_math.exp(tmp37)
tmp39 = tmp36 + tmp38
tl.store(out_ptr0 + (x3), tmp28, xmask)
tl.store(out_ptr1 + (x3), tmp39, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/fn/cfnmi7pncqelvz56byxmokejmd37qckxv3wfecynqkl3e6gmjuq3.py
# Topologically Sorted Source Nodes: [add_2, scores, scores_1, softmax, attn], Original ATen: [aten.add, aten.div, aten.masked_fill, aten._softmax]
# Source node to ATen node mapping:
# add_2 => add_2
# attn => full_default_1, where_1
# scores => div
# scores_1 => full_default, where
# softmax => div_1, exp, sub
# Graph fragment:
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_14, %view_17), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_2, 1.0), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -inf), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %div), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default_1, %div_1), kwargs = {})
triton_poi_fused__softmax_add_div_masked_fill_5 = async_compile.triton('triton_poi_fused__softmax_add_div_masked_fill_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_div_masked_fill_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_div_masked_fill_5(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = (xindex // 64)
x4 = xindex % 16
x5 = xindex
x6 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x4 + (16*x3)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp1 = tl.load(in_out_ptr0 + (x5), xmask)
tmp2 = tl.load(in_ptr1 + (x5), xmask)
tmp8 = tl.load(in_ptr2 + (x6), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + (x6), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = float("-inf")
tmp7 = tl.where(tmp0, tmp6, tmp5)
tmp9 = tmp7 - tmp8
tmp10 = tl_math.exp(tmp9)
tmp12 = tmp10 / tmp11
tmp13 = 0.0
tmp14 = tl.where(tmp0, tmp13, tmp12)
tl.store(in_out_ptr0 + (x5), tmp12, xmask)
tl.store(out_ptr0 + (x5), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_10, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4, 1), (1, 1))
assert_size_stride(primals_13, (4, 1), (1, 1))
assert_size_stride(primals_14, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_15, (4, 4), (4, 1))
assert_size_stride(primals_16, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf3)
del primals_11
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matrix_ac, matrix_bd], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf0, primals_3, primals_12, primals_13, buf4, buf7, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_12
del primals_13
del primals_3
buf5 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [matrix_ac], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf1, primals_5, buf5, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matrix_ac], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6)
buf8 = reinterpret_tensor(buf1, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [matrix_bd], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf3, buf8, 16, 4, grid=grid(16, 4), stream=stream0)
buf9 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matrix_bd], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf8, (16, 1, 4), (4, 0, 1), 0), out=buf9)
buf10 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [mask], Original ATen: [aten.eq]
triton_poi_fused_eq_3.run(primals_14, buf10, 64, grid=grid(64), stream=stream0)
del primals_14
buf11 = reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf3 # reuse
buf12 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [add_2, scores, scores_1, softmax], Original ATen: [aten.add, aten.div, aten.masked_fill, aten._softmax]
triton_poi_fused__softmax_add_div_masked_fill_4.run(buf10, buf6, buf9, buf11, buf12, 64, grid=grid(64), stream=stream0)
buf13 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse
buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add_2, scores, scores_1, softmax, attn], Original ATen: [aten.add, aten.div, aten.masked_fill, aten._softmax]
triton_poi_fused__softmax_add_div_masked_fill_5.run(buf13, buf10, buf9, buf11, buf12, buf14, 256, grid=grid(256), stream=stream0)
del buf9
buf15 = reinterpret_tensor(buf12, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf12 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf2, primals_8, buf15, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_8
buf16 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf14, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf15, (16, 4, 1), (4, 1, 0), 0), out=buf16)
buf17 = reinterpret_tensor(buf11, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf11 # reuse
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf16, buf17, 16, 4, grid=grid(16, 4), stream=stream0)
buf18 = reinterpret_tensor(buf16, (16, 4), (4, 1), 0); del buf16 # reuse
# Topologically Sorted Source Nodes: [linear_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_16, reinterpret_tensor(buf17, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf18)
del primals_16
return (reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (16, 4), (4, 1), 0), buf10, buf13, reinterpret_tensor(buf17, (16, 4), (4, 1), 0), primals_15, reinterpret_tensor(buf14, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf15, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf7, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
from typing import Optional
from typing import Tuple
from torch import nn
class MultiHeadedAttention(nn.Module):
"""Multi-Head Attention layer.
Args:
n_head (int): The number of heads.
n_feat (int): The number of features.
dropout_rate (float): Dropout rate.
"""
def __init__(self, n_head: 'int', n_feat: 'int', dropout_rate: 'float'):
"""Construct an MultiHeadedAttention object."""
super().__init__()
assert n_feat % n_head == 0
self.d_k = n_feat // n_head
self.h = n_head
self.linear_q = nn.Linear(n_feat, n_feat)
self.linear_k = nn.Linear(n_feat, n_feat)
self.linear_v = nn.Linear(n_feat, n_feat)
self.linear_out = nn.Linear(n_feat, n_feat)
self.dropout = nn.Dropout(p=dropout_rate)
def forward_qkv(self, query: 'torch.Tensor', key: 'torch.Tensor', value:
'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Transform query, key and value.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
Returns:
torch.Tensor: Transformed query tensor, size
(#batch, n_head, time1, d_k).
torch.Tensor: Transformed key tensor, size
(#batch, n_head, time2, d_k).
torch.Tensor: Transformed value tensor, size
(#batch, n_head, time2, d_k).
"""
n_batch = query.size(0)
q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)
k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
q = q.transpose(1, 2)
k = k.transpose(1, 2)
v = v.transpose(1, 2)
return q, k, v
def forward_attention(self, value: 'torch.Tensor', scores:
'torch.Tensor', mask: 'Optional[torch.Tensor]') ->torch.Tensor:
"""Compute attention context vector.
Args:
value (torch.Tensor): Transformed value, size
(#batch, n_head, time2, d_k).
scores (torch.Tensor): Attention score, size
(#batch, n_head, time1, time2).
mask (torch.Tensor): Mask, size (#batch, 1, time2) or
(#batch, time1, time2).
Returns:
torch.Tensor: Transformed value (#batch, time1, d_model)
weighted by the attention score (#batch, time1, time2).
"""
n_batch = value.size(0)
if mask is not None:
mask = mask.unsqueeze(1).eq(0)
scores = scores.masked_fill(mask, -float('inf'))
attn = torch.softmax(scores, dim=-1).masked_fill(mask, 0.0)
else:
attn = torch.softmax(scores, dim=-1)
p_attn = self.dropout(attn)
x = torch.matmul(p_attn, value)
x = x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)
return self.linear_out(x)
def forward(self, query: 'torch.Tensor', key: 'torch.Tensor', value:
'torch.Tensor', mask: 'Optional[torch.Tensor]', pos_emb:
'torch.Tensor'=torch.empty(0)) ->torch.Tensor:
"""Compute scaled dot product attention.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
(#batch, time1, time2).
1.When applying cross attention between decoder and encoder,
the batch padding mask for input is in (#batch, 1, T) shape.
2.When applying self attention of encoder,
the mask is in (#batch, T, T) shape.
3.When applying self attention of decoder,
the mask is in (#batch, L, L) shape.
4.If the different position in decoder see different block
of the encoder, such as Mocha, the passed in mask could be
in (#batch, L, T) shape. But there is no such case in current
Wenet.
Returns:
torch.Tensor: Output tensor (#batch, time1, d_model).
"""
q, k, v = self.forward_qkv(query, key, value)
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)
return self.forward_attention(v, scores, mask)
class RelPositionMultiHeadedAttention(MultiHeadedAttention):
"""Multi-Head Attention layer with relative position encoding.
Paper: https://arxiv.org/abs/1901.02860
Args:
n_head (int): The number of heads.
n_feat (int): The number of features.
dropout_rate (float): Dropout rate.
"""
def __init__(self, n_head, n_feat, dropout_rate):
"""Construct an RelPositionMultiHeadedAttention object."""
super().__init__(n_head, n_feat, dropout_rate)
self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)
self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))
self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))
torch.nn.init.xavier_uniform_(self.pos_bias_u)
torch.nn.init.xavier_uniform_(self.pos_bias_v)
def rel_shift(self, x, zero_triu: 'bool'=False):
"""Compute relative positinal encoding.
Args:
x (torch.Tensor): Input tensor (batch, time, size).
zero_triu (bool): If true, return the lower triangular part of
the matrix.
Returns:
torch.Tensor: Output tensor.
"""
zero_pad = torch.zeros((x.size()[0], x.size()[1], x.size()[2], 1),
device=x.device, dtype=x.dtype)
x_padded = torch.cat([zero_pad, x], dim=-1)
x_padded = x_padded.view(x.size()[0], x.size()[1], x.size(3) + 1, x
.size(2))
x = x_padded[:, :, 1:].view_as(x)
if zero_triu:
ones = torch.ones((x.size(2), x.size(3)))
x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]
return x
def forward(self, query: 'torch.Tensor', key: 'torch.Tensor', value:
'torch.Tensor', mask: 'Optional[torch.Tensor]', pos_emb: 'torch.Tensor'
):
"""Compute 'Scaled Dot Product Attention' with rel. positional encoding.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
(#batch, time1, time2).
pos_emb (torch.Tensor): Positional embedding tensor
(#batch, time2, size).
Returns:
torch.Tensor: Output tensor (#batch, time1, d_model).
"""
q, k, v = self.forward_qkv(query, key, value)
q = q.transpose(1, 2)
n_batch_pos = pos_emb.size(0)
p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)
p = p.transpose(1, 2)
q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)
q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)
matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))
matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))
scores = (matrix_ac + matrix_bd) / math.sqrt(self.d_k)
return self.forward_attention(v, scores, mask)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_head': 4, 'n_feat': 4, 'dropout_rate': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
from typing import Optional
from typing import Tuple
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + y0, ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp2 + tmp5
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
tl.store(out_ptr1 + (x2 + 4 * y3), tmp6, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_eq_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_add_div_masked_fill_4(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4 * x0 + 16 * x2), xmask, eviction_policy=
'evict_last').to(tl.int1)
tmp1 = tl.load(in_ptr1 + 4 * x3, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + 4 * x3, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy
='evict_last').to(tl.int1)
tmp9 = tl.load(in_ptr1 + (1 + 4 * x3), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (1 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last').to(tl.int1)
tmp16 = tl.load(in_ptr1 + (2 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr2 + (2 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last').to(tl.int1)
tmp23 = tl.load(in_ptr1 + (3 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp24 = tl.load(in_ptr2 + (3 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp3 = tmp1 + tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = float('-inf')
tmp7 = tl.where(tmp0, tmp6, tmp5)
tmp11 = tmp9 + tmp10
tmp12 = tmp11 * tmp4
tmp13 = tl.where(tmp8, tmp6, tmp12)
tmp14 = triton_helpers.maximum(tmp7, tmp13)
tmp18 = tmp16 + tmp17
tmp19 = tmp18 * tmp4
tmp20 = tl.where(tmp15, tmp6, tmp19)
tmp21 = triton_helpers.maximum(tmp14, tmp20)
tmp25 = tmp23 + tmp24
tmp26 = tmp25 * tmp4
tmp27 = tl.where(tmp22, tmp6, tmp26)
tmp28 = triton_helpers.maximum(tmp21, tmp27)
tmp29 = tmp7 - tmp28
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp13 - tmp28
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp30 + tmp32
tmp34 = tmp20 - tmp28
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp33 + tmp35
tmp37 = tmp27 - tmp28
tmp38 = tl_math.exp(tmp37)
tmp39 = tmp36 + tmp38
tl.store(out_ptr0 + x3, tmp28, xmask)
tl.store(out_ptr1 + x3, tmp39, xmask)
@triton.jit
def triton_poi_fused__softmax_add_div_masked_fill_5(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 64
x4 = xindex % 16
x5 = xindex
x6 = xindex // 4
tmp0 = tl.load(in_ptr0 + (x4 + 16 * x3), xmask, eviction_policy=
'evict_last').to(tl.int1)
tmp1 = tl.load(in_out_ptr0 + x5, xmask)
tmp2 = tl.load(in_ptr1 + x5, xmask)
tmp8 = tl.load(in_ptr2 + x6, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + x6, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = float('-inf')
tmp7 = tl.where(tmp0, tmp6, tmp5)
tmp9 = tmp7 - tmp8
tmp10 = tl_math.exp(tmp9)
tmp12 = tmp10 / tmp11
tmp13 = 0.0
tmp14 = tl.where(tmp0, tmp13, tmp12)
tl.store(in_out_ptr0 + x5, tmp12, xmask)
tl.store(out_ptr0 + x5, tmp14, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_10, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4, 1), (1, 1))
assert_size_stride(primals_13, (4, 1), (1, 1))
assert_size_stride(primals_14, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_15, (4, 4), (4, 1))
assert_size_stride(primals_16, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_10, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf3)
del primals_11
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_3, primals_12,
primals_13, buf4, buf7, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1,
num_stages=1)
del primals_12
del primals_13
del primals_3
buf5 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_clone_1[grid(16, 4)](buf1, primals_5, buf5, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6)
buf8 = reinterpret_tensor(buf1, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf1
triton_poi_fused_clone_2[grid(16, 4)](buf3, buf8, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf9 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf8, (16, 1, 4), (4, 0, 1), 0), out=buf9)
buf10 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.bool)
triton_poi_fused_eq_3[grid(64)](primals_14, buf10, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_14
buf11 = reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf3
buf12 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_add_div_masked_fill_4[grid(64)](buf10,
buf6, buf9, buf11, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf13 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf6
buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_add_div_masked_fill_5[grid(256)](buf13,
buf10, buf9, buf11, buf12, buf14, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf9
buf15 = reinterpret_tensor(buf12, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf12
triton_poi_fused_clone_1[grid(16, 4)](buf2, primals_8, buf15, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_8
buf16 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf14, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf15, (16, 4, 1), (4, 1, 0), 0), out=buf16)
buf17 = reinterpret_tensor(buf11, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf11
triton_poi_fused_clone_2[grid(16, 4)](buf16, buf17, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf18 = reinterpret_tensor(buf16, (16, 4), (4, 1), 0)
del buf16
extern_kernels.addmm(primals_16, reinterpret_tensor(buf17, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf18)
del primals_16
return reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_10, (16, 4), (4, 1), 0
), buf10, buf13, reinterpret_tensor(buf17, (16, 4), (4, 1), 0
), primals_15, reinterpret_tensor(buf14, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf15, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf7, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 4), 0
), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0)
class MultiHeadedAttention(nn.Module):
"""Multi-Head Attention layer.
Args:
n_head (int): The number of heads.
n_feat (int): The number of features.
dropout_rate (float): Dropout rate.
"""
def __init__(self, n_head: 'int', n_feat: 'int', dropout_rate: 'float'):
"""Construct an MultiHeadedAttention object."""
super().__init__()
assert n_feat % n_head == 0
self.d_k = n_feat // n_head
self.h = n_head
self.linear_q = nn.Linear(n_feat, n_feat)
self.linear_k = nn.Linear(n_feat, n_feat)
self.linear_v = nn.Linear(n_feat, n_feat)
self.linear_out = nn.Linear(n_feat, n_feat)
self.dropout = nn.Dropout(p=dropout_rate)
def forward_qkv(self, query: 'torch.Tensor', key: 'torch.Tensor', value:
'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Transform query, key and value.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
Returns:
torch.Tensor: Transformed query tensor, size
(#batch, n_head, time1, d_k).
torch.Tensor: Transformed key tensor, size
(#batch, n_head, time2, d_k).
torch.Tensor: Transformed value tensor, size
(#batch, n_head, time2, d_k).
"""
n_batch = query.size(0)
q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)
k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
q = q.transpose(1, 2)
k = k.transpose(1, 2)
v = v.transpose(1, 2)
return q, k, v
def forward_attention(self, value: 'torch.Tensor', scores:
'torch.Tensor', mask: 'Optional[torch.Tensor]') ->torch.Tensor:
"""Compute attention context vector.
Args:
value (torch.Tensor): Transformed value, size
(#batch, n_head, time2, d_k).
scores (torch.Tensor): Attention score, size
(#batch, n_head, time1, time2).
mask (torch.Tensor): Mask, size (#batch, 1, time2) or
(#batch, time1, time2).
Returns:
torch.Tensor: Transformed value (#batch, time1, d_model)
weighted by the attention score (#batch, time1, time2).
"""
n_batch = value.size(0)
if mask is not None:
mask = mask.unsqueeze(1).eq(0)
scores = scores.masked_fill(mask, -float('inf'))
attn = torch.softmax(scores, dim=-1).masked_fill(mask, 0.0)
else:
attn = torch.softmax(scores, dim=-1)
p_attn = self.dropout(attn)
x = torch.matmul(p_attn, value)
x = x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)
return self.linear_out(x)
def forward(self, query: 'torch.Tensor', key: 'torch.Tensor', value:
'torch.Tensor', mask: 'Optional[torch.Tensor]', pos_emb:
'torch.Tensor'=torch.empty(0)) ->torch.Tensor:
"""Compute scaled dot product attention.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
(#batch, time1, time2).
1.When applying cross attention between decoder and encoder,
the batch padding mask for input is in (#batch, 1, T) shape.
2.When applying self attention of encoder,
the mask is in (#batch, T, T) shape.
3.When applying self attention of decoder,
the mask is in (#batch, L, L) shape.
4.If the different position in decoder see different block
of the encoder, such as Mocha, the passed in mask could be
in (#batch, L, T) shape. But there is no such case in current
Wenet.
Returns:
torch.Tensor: Output tensor (#batch, time1, d_model).
"""
q, k, v = self.forward_qkv(query, key, value)
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)
return self.forward_attention(v, scores, mask)
class RelPositionMultiHeadedAttentionNew(MultiHeadedAttention):
"""Multi-Head Attention layer with relative position encoding.
Paper: https://arxiv.org/abs/1901.02860
Args:
n_head (int): The number of heads.
n_feat (int): The number of features.
dropout_rate (float): Dropout rate.
"""
def __init__(self, n_head, n_feat, dropout_rate):
"""Construct an RelPositionMultiHeadedAttention object."""
super().__init__(n_head, n_feat, dropout_rate)
self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)
self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))
self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))
torch.nn.init.xavier_uniform_(self.pos_bias_u)
torch.nn.init.xavier_uniform_(self.pos_bias_v)
def rel_shift(self, x, zero_triu: 'bool'=False):
"""Compute relative positinal encoding.
Args:
x (torch.Tensor): Input tensor (batch, time, size).
zero_triu (bool): If true, return the lower triangular part of
the matrix.
Returns:
torch.Tensor: Output tensor.
"""
zero_pad = torch.zeros((x.size()[0], x.size()[1], x.size()[2], 1),
device=x.device, dtype=x.dtype)
x_padded = torch.cat([zero_pad, x], dim=-1)
x_padded = x_padded.view(x.size()[0], x.size()[1], x.size(3) + 1, x
.size(2))
x = x_padded[:, :, 1:].view_as(x)
if zero_triu:
ones = torch.ones((x.size(2), x.size(3)))
x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]
return x
def forward(self, input_0, input_1, input_2, input_3, input_4):
primals_12 = self.pos_bias_u
primals_13 = self.pos_bias_v
primals_2 = self.linear_q.weight
primals_3 = self.linear_q.bias
primals_4 = self.linear_k.weight
primals_5 = self.linear_k.bias
primals_7 = self.linear_v.weight
primals_8 = self.linear_v.bias
primals_11 = self.linear_out.weight
primals_16 = self.linear_out.bias
primals_15 = self.linear_pos.weight
primals_1 = input_0
primals_6 = input_1
primals_9 = input_2
primals_10 = input_3
primals_14 = input_4
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16])
return output[0]
| InfluencerNGZK/wenet | RelPositionMultiHeadedAttention | false | 2,405 | [
"Apache-2.0"
] | 0 | 9a3c7f70a78ce675f5e013b1f67a06d1d23fba3e | https://github.com/InfluencerNGZK/wenet/tree/9a3c7f70a78ce675f5e013b1f67a06d1d23fba3e | import math
import torch
from typing import Optional
from typing import Tuple
from torch import nn
class MultiHeadedAttention(nn.Module):
"""Multi-Head Attention layer.
Args:
n_head (int): The number of heads.
n_feat (int): The number of features.
dropout_rate (float): Dropout rate.
"""
def __init__(self, n_head: 'int', n_feat: 'int', dropout_rate: 'float'):
"""Construct an MultiHeadedAttention object."""
super().__init__()
assert n_feat % n_head == 0
self.d_k = n_feat // n_head
self.h = n_head
self.linear_q = nn.Linear(n_feat, n_feat)
self.linear_k = nn.Linear(n_feat, n_feat)
self.linear_v = nn.Linear(n_feat, n_feat)
self.linear_out = nn.Linear(n_feat, n_feat)
self.dropout = nn.Dropout(p=dropout_rate)
def forward_qkv(self, query: 'torch.Tensor', key: 'torch.Tensor', value:
'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Transform query, key and value.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
Returns:
torch.Tensor: Transformed query tensor, size
(#batch, n_head, time1, d_k).
torch.Tensor: Transformed key tensor, size
(#batch, n_head, time2, d_k).
torch.Tensor: Transformed value tensor, size
(#batch, n_head, time2, d_k).
"""
n_batch = query.size(0)
q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)
k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
q = q.transpose(1, 2)
k = k.transpose(1, 2)
v = v.transpose(1, 2)
return q, k, v
def forward_attention(self, value: 'torch.Tensor', scores:
'torch.Tensor', mask: 'Optional[torch.Tensor]') ->torch.Tensor:
"""Compute attention context vector.
Args:
value (torch.Tensor): Transformed value, size
(#batch, n_head, time2, d_k).
scores (torch.Tensor): Attention score, size
(#batch, n_head, time1, time2).
mask (torch.Tensor): Mask, size (#batch, 1, time2) or
(#batch, time1, time2).
Returns:
torch.Tensor: Transformed value (#batch, time1, d_model)
weighted by the attention score (#batch, time1, time2).
"""
n_batch = value.size(0)
if mask is not None:
mask = mask.unsqueeze(1).eq(0)
scores = scores.masked_fill(mask, -float('inf'))
attn = torch.softmax(scores, dim=-1).masked_fill(mask, 0.0)
else:
attn = torch.softmax(scores, dim=-1)
p_attn = self.dropout(attn)
x = torch.matmul(p_attn, value)
x = x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)
return self.linear_out(x)
def forward(self, query: 'torch.Tensor', key: 'torch.Tensor', value:
'torch.Tensor', mask: 'Optional[torch.Tensor]', pos_emb:
'torch.Tensor'=torch.empty(0)) ->torch.Tensor:
"""Compute scaled dot product attention.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
(#batch, time1, time2).
1.When applying cross attention between decoder and encoder,
the batch padding mask for input is in (#batch, 1, T) shape.
2.When applying self attention of encoder,
the mask is in (#batch, T, T) shape.
3.When applying self attention
# ... truncated (>4000 chars) for memory efficiency |
BahdanauAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/fo/cfokv5mhjtrq5wugzlwfb7neo7lma2qe4v72mqif34incoecopkz.py
# Topologically Sorted Source Nodes: [norm], Original ATen: [aten.linalg_vector_norm]
# Source node to ATen node mapping:
# norm => pow_1, sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_6, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, None), kwargs = {})
triton_per_fused_linalg_vector_norm_0 = async_compile.triton('triton_per_fused_linalg_vector_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_linalg_vector_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.sum(tmp2, 1)[:, None]
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/n4/cn4tc22cfrubj7ikntlgvo2zigcqyb4exwc5hxvpzcneh4ztsn7l.py
# Topologically Sorted Source Nodes: [add, add_1, tanh], Original ATen: [aten.add, aten.tanh]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# tanh => tanh
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%unsqueeze, %view_3), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %primals_7), kwargs = {})
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_1,), kwargs = {})
triton_poi_fused_add_tanh_1 = async_compile.triton('triton_poi_fused_add_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_tanh_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = libdevice.tanh(tmp4)
tl.store(in_out_ptr0 + (x2), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/by/cbyk55yox2xs65bp7r2r6lyxsibdiqng2w7afs2wvc4dl67gwtfi.py
# Topologically Sorted Source Nodes: [mul, norm, normed_v, mul_1, attn_scores, attn_scores_1], Original ATen: [aten.mul, aten.linalg_vector_norm, aten.div, aten.sum, aten._softmax]
# Source node to ATen node mapping:
# attn_scores => sum_2
# attn_scores_1 => amax, div_1, exp, sub, sum_3
# mul => mul
# mul_1 => mul_1
# norm => pow_2
# normed_v => div
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_5, %primals_6), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %pow_2), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %tanh), kwargs = {})
# %sum_2 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [2]), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%sum_2, [0], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_2, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [0], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_3), kwargs = {})
triton_poi_fused__softmax_div_linalg_vector_norm_mul_sum_2 = async_compile.triton('triton_poi_fused__softmax_div_linalg_vector_norm_mul_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_div_linalg_vector_norm_mul_sum_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_div_linalg_vector_norm_mul_sum_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = (xindex // 16)
x4 = xindex % 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp8 = tl.load(in_ptr3 + (x4 + (64*x2)), xmask)
tmp10 = tl.load(in_ptr3 + (16 + x4 + (64*x2)), xmask)
tmp13 = tl.load(in_ptr3 + (32 + x4 + (64*x2)), xmask)
tmp16 = tl.load(in_ptr3 + (48 + x4 + (64*x2)), xmask)
tmp3 = tmp1 * tmp2
tmp6 = libdevice.sqrt(tmp5)
tmp7 = tmp3 / tmp6
tmp9 = tmp7 * tmp8
tmp11 = tmp7 * tmp10
tmp12 = tmp9 + tmp11
tmp14 = tmp7 * tmp13
tmp15 = tmp12 + tmp14
tmp17 = tmp7 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp18 - tmp18
tmp20 = tl_math.exp(tmp19)
tmp21 = tmp20 / tmp20
tl.store(in_out_ptr0 + (x3), tmp21, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/nu/cnub5y6hoo5eqx4ansihvpe6lypvv3sdp7muydbgzcmc6izyixnc.py
# Topologically Sorted Source Nodes: [mul_2, context], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# context => sum_4
# mul_2 => mul_2
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_1, %primals_4), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [0]), kwargs = {})
triton_poi_fused_mul_sum_3 = async_compile.triton('triton_poi_fused_mul_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = (xindex // 64)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x3), xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (1, ), (1, ))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [key], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [norm], Original ATen: [aten.linalg_vector_norm]
stream0 = get_raw_stream(0)
triton_per_fused_linalg_vector_norm_0.run(primals_6, buf2, 1, 4, grid=grid(1), stream=stream0)
buf3 = reinterpret_tensor(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [add, add_1, tanh], Original ATen: [aten.add, aten.tanh]
triton_poi_fused_add_tanh_1.run(buf3, buf1, primals_7, 256, grid=grid(256), stream=stream0)
del primals_7
buf4 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [mul, norm, normed_v, mul_1, attn_scores, attn_scores_1], Original ATen: [aten.mul, aten.linalg_vector_norm, aten.div, aten.sum, aten._softmax]
triton_poi_fused__softmax_div_linalg_vector_norm_mul_sum_2.run(buf5, primals_5, primals_6, buf2, buf3, 64, grid=grid(64), stream=stream0)
del buf2
buf6 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [mul_2, context], Original ATen: [aten.mul, aten.sum]
triton_poi_fused_mul_sum_3.run(buf5, primals_4, buf6, 256, grid=grid(256), stream=stream0)
return (buf6, buf5, primals_4, primals_5, primals_6, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf3, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import Parameter
import torch.optim.lr_scheduler
import torch.utils.data
import torch.onnx.operators
import torch.optim
class BaseAttention(nn.Module):
"""Base class for attention layers."""
def __init__(self, query_dim, value_dim, embed_dim=None):
super().__init__()
self.query_dim = query_dim
self.value_dim = value_dim
self.embed_dim = embed_dim
self.onnx_trace = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def reset_parameters(self):
pass
def forward(self, query, value, key_padding_mask=None, state=None):
raise NotImplementedError
class BahdanauAttention(BaseAttention):
""" Bahdanau Attention."""
def __init__(self, query_dim, value_dim, embed_dim, normalize=True):
super().__init__(query_dim, value_dim, embed_dim)
self.query_proj = nn.Linear(self.query_dim, embed_dim, bias=False)
self.value_proj = nn.Linear(self.value_dim, embed_dim, bias=False)
self.v = Parameter(torch.Tensor(embed_dim))
self.normalize = normalize
if self.normalize:
self.b = Parameter(torch.Tensor(embed_dim))
self.g = Parameter(torch.Tensor(1))
self.reset_parameters()
def reset_parameters(self):
self.query_proj.weight.data.uniform_(-0.1, 0.1)
self.value_proj.weight.data.uniform_(-0.1, 0.1)
nn.init.uniform_(self.v, -0.1, 0.1)
if self.normalize:
nn.init.constant_(self.b, 0.0)
nn.init.constant_(self.g, math.sqrt(1.0 / self.embed_dim))
def forward(self, query, value, key_padding_mask=None, state=None):
projected_query = self.query_proj(query).unsqueeze(0)
key = self.value_proj(value)
if self.normalize:
normed_v = self.g * self.v / torch.norm(self.v)
attn_scores = (normed_v * torch.tanh(projected_query + key +
self.b)).sum(dim=2)
else:
attn_scores = self.v * torch.tanh(projected_query + key).sum(dim=2)
if key_padding_mask is not None:
attn_scores = attn_scores.float().masked_fill_(key_padding_mask,
float('-inf')).type_as(attn_scores)
attn_scores = F.softmax(attn_scores, dim=0)
context = (attn_scores.unsqueeze(2) * value).sum(dim=0)
next_state = attn_scores
return context, attn_scores, next_state
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'query_dim': 4, 'value_dim': 4, 'embed_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
from torch.nn import Parameter
import torch.optim.lr_scheduler
import torch.utils.data
import torch.onnx.operators
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.sum(tmp2, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp4, None)
@triton.jit
def triton_poi_fused_add_tanh_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = libdevice.tanh(tmp4)
tl.store(in_out_ptr0 + x2, tmp5, xmask)
@triton.jit
def triton_poi_fused__softmax_div_linalg_vector_norm_mul_sum_2(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16
x4 = xindex % 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp8 = tl.load(in_ptr3 + (x4 + 64 * x2), xmask)
tmp10 = tl.load(in_ptr3 + (16 + x4 + 64 * x2), xmask)
tmp13 = tl.load(in_ptr3 + (32 + x4 + 64 * x2), xmask)
tmp16 = tl.load(in_ptr3 + (48 + x4 + 64 * x2), xmask)
tmp3 = tmp1 * tmp2
tmp6 = libdevice.sqrt(tmp5)
tmp7 = tmp3 / tmp6
tmp9 = tmp7 * tmp8
tmp11 = tmp7 * tmp10
tmp12 = tmp9 + tmp11
tmp14 = tmp7 * tmp13
tmp15 = tmp12 + tmp14
tmp17 = tmp7 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp18 - tmp18
tmp20 = tl_math.exp(tmp19)
tmp21 = tmp20 / tmp20
tl.store(in_out_ptr0 + x3, tmp21, xmask)
@triton.jit
def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x3, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (1,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_linalg_vector_norm_0[grid(1)](primals_6, buf2, 1,
4, XBLOCK=1, num_warps=2, num_stages=1)
buf3 = reinterpret_tensor(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0
)
del buf0
triton_poi_fused_add_tanh_1[grid(256)](buf3, buf1, primals_7, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf4 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = buf4
del buf4
triton_poi_fused__softmax_div_linalg_vector_norm_mul_sum_2[grid(64)](
buf5, primals_5, primals_6, buf2, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf2
buf6 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused_mul_sum_3[grid(256)](buf5, primals_4, buf6, 256,
XBLOCK=256, num_warps=4, num_stages=1)
return buf6, buf5, primals_4, primals_5, primals_6, reinterpret_tensor(
primals_2, (64, 4), (4, 1), 0), buf3, buf5
class BaseAttention(nn.Module):
"""Base class for attention layers."""
def __init__(self, query_dim, value_dim, embed_dim=None):
super().__init__()
self.query_dim = query_dim
self.value_dim = value_dim
self.embed_dim = embed_dim
self.onnx_trace = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def reset_parameters(self):
pass
def forward(self, query, value, key_padding_mask=None, state=None):
raise NotImplementedError
class BahdanauAttentionNew(BaseAttention):
""" Bahdanau Attention."""
def __init__(self, query_dim, value_dim, embed_dim, normalize=True):
super().__init__(query_dim, value_dim, embed_dim)
self.query_proj = nn.Linear(self.query_dim, embed_dim, bias=False)
self.value_proj = nn.Linear(self.value_dim, embed_dim, bias=False)
self.v = Parameter(torch.Tensor(embed_dim))
self.normalize = normalize
if self.normalize:
self.b = Parameter(torch.Tensor(embed_dim))
self.g = Parameter(torch.Tensor(1))
self.reset_parameters()
def reset_parameters(self):
self.query_proj.weight.data.uniform_(-0.1, 0.1)
self.value_proj.weight.data.uniform_(-0.1, 0.1)
nn.init.uniform_(self.v, -0.1, 0.1)
if self.normalize:
nn.init.constant_(self.b, 0.0)
nn.init.constant_(self.g, math.sqrt(1.0 / self.embed_dim))
def forward(self, input_0, input_1):
primals_6 = self.v
primals_7 = self.b
primals_5 = self.g
primals_1 = self.query_proj.weight
primals_3 = self.value_proj.weight
primals_2 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1], output[2]
| Fei00Wu/espresso | BahdanauAttention | false | 2,406 | [
"MIT"
] | 0 | 4e8e6e2f9151a87448845c5142611c103dd4580c | https://github.com/Fei00Wu/espresso/tree/4e8e6e2f9151a87448845c5142611c103dd4580c | import math
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import Parameter
import torch.optim.lr_scheduler
import torch.utils.data
import torch.onnx.operators
import torch.optim
class BaseAttention(nn.Module):
"""Base class for attention layers."""
def __init__(self, query_dim, value_dim, embed_dim=None):
super().__init__()
self.query_dim = query_dim
self.value_dim = value_dim
self.embed_dim = embed_dim
self.onnx_trace = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def reset_parameters(self):
pass
def forward(self, query, value, key_padding_mask=None, state=None):
raise NotImplementedError
class Model(BaseAttention):
""" Bahdanau Attention."""
def __init__(self, query_dim, value_dim, embed_dim, normalize=True):
super().__init__(query_dim, value_dim, embed_dim)
self.query_proj = nn.Linear(self.query_dim, embed_dim, bias=False)
self.value_proj = nn.Linear(self.value_dim, embed_dim, bias=False)
self.v = Parameter(torch.Tensor(embed_dim))
self.normalize = normalize
if self.normalize:
self.b = Parameter(torch.Tensor(embed_dim))
self.g = Parameter(torch.Tensor(1))
self.reset_parameters()
def reset_parameters(self):
self.query_proj.weight.data.uniform_(-0.1, 0.1)
self.value_proj.weight.data.uniform_(-0.1, 0.1)
nn.init.uniform_(self.v, -0.1, 0.1)
if self.normalize:
nn.init.constant_(self.b, 0.0)
nn.init.constant_(self.g, math.sqrt(1.0 / self.embed_dim))
def forward(self, query, value, key_padding_mask=None, state=None):
projected_query = self.query_proj(query).unsqueeze(0)
key = self.value_proj(value)
if self.normalize:
normed_v = self.g * self.v / torch.norm(self.v)
attn_scores = (normed_v * torch.tanh(projected_query + key +
self.b)).sum(dim=2)
else:
attn_scores = self.v * torch.tanh(projected_query + key).sum(dim=2)
if key_padding_mask is not None:
attn_scores = attn_scores.float().masked_fill_(key_padding_mask,
float('-inf')).type_as(attn_scores)
attn_scores = F.softmax(attn_scores, dim=0)
context = (attn_scores.unsqueeze(2) * value).sum(dim=0)
next_state = attn_scores
return context, attn_scores, next_state
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
Project3D | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/32/c32ppse2vdmak5is2nuwq2vbmvddtxyrdxkeqrxyec7bhptha7aa.py
# Topologically Sorted Source Nodes: [cam_points], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# cam_points => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_2,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 48
x1 = (xindex // 48)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/p3/cp33b3l57w5jlxxjkbyxuiiw6erdw3sxbh2kc6ydjd74u3ubmxdx.py
# Topologically Sorted Source Nodes: [sub, pix_coords_3], Original ATen: [aten.sub, aten.mul]
# Source node to ATen node mapping:
# pix_coords_3 => mul
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute_16, 0.5), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, 2), kwargs = {})
triton_poi_fused_mul_sub_1 = async_compile.triton('triton_poi_fused_mul_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sub_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 2
x0 = xindex % 16
x2 = (xindex // 32)
x3 = xindex % 32
x4 = xindex
tmp7 = tl.load(in_ptr0 + (x0 + (48*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + (48*x2)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (16 + x0 + (48*x2)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr0 + (x3 + (48*x2)), xmask)
tmp0 = x1
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tmp1 == tmp1
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = tmp1 == tmp4
tmp6 = tmp4 == tmp4
tmp9 = 1e-07
tmp10 = tmp8 + tmp9
tmp11 = tmp7 / tmp10
tmp12 = 0.3333333333333333
tmp13 = tmp11 * tmp12
tmp14 = tl.where(tmp6, tmp13, tmp11)
tmp16 = tmp15 / tmp10
tmp17 = tl.where(tmp5, tmp13, tmp16)
tmp18 = tl.where(tmp5, tmp14, tmp17)
tmp19 = tmp18 * tmp12
tmp20 = tl.where(tmp3, tmp19, tmp18)
tmp21 = tmp0 == tmp4
tmp23 = tmp22 / tmp10
tmp24 = tl.where(tmp21, tmp13, tmp23)
tmp25 = tl.where(tmp21, tmp14, tmp24)
tmp26 = tl.where(tmp2, tmp19, tmp25)
tmp27 = tl.where(tmp2, tmp20, tmp26)
tmp28 = 0.5
tmp29 = tmp27 - tmp28
tmp30 = 2.0
tmp31 = tmp29 * tmp30
tl.store(out_ptr0 + (x4), tmp31, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 3, 4, 4), (48, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cam_points], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf0, buf1, 192, grid=grid(192), stream=stream0)
del buf0
buf2 = empty_strided_cuda((12, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cam_points], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf1, (12, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (12, 4, 4), (16, 4, 1), 0), out=buf2)
del arg2_1
del buf1
buf3 = empty_strided_cuda((4, 4, 4, 2), (32, 4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [sub, pix_coords_3], Original ATen: [aten.sub, aten.mul]
triton_poi_fused_mul_sub_1.run(buf2, buf3, 128, grid=grid(128), stream=stream0)
del buf2
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 3, 4, 4), (48, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from functools import *
class Project3D(nn.Module):
"""Layer which projects 3D points into a camera with intrinsics K and at position T
"""
def __init__(self, batch_size, height, width, eps=1e-07):
super(Project3D, self).__init__()
self.batch_size = batch_size
self.height = height
self.width = width
self.eps = eps
def forward(self, points, K, T):
P = torch.matmul(K, T)[:, :3, :]
cam_points = torch.matmul(P, points)
pix_coords = cam_points[:, :2, :] / (cam_points[:, 2, :].unsqueeze(
1) + self.eps)
pix_coords = pix_coords.view(self.batch_size, 2, self.height, self.
width)
pix_coords = pix_coords.permute(0, 2, 3, 1)
pix_coords[..., 0] /= self.width - 1
pix_coords[..., 1] /= self.height - 1
pix_coords = (pix_coords - 0.5) * 2
return pix_coords
def get_inputs():
return [torch.rand([4, 3, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'batch_size': 4, 'height': 4, 'width': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
from functools import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 48
x1 = xindex // 48
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_mul_sub_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 2
x0 = xindex % 16
x2 = xindex // 32
x3 = xindex % 32
x4 = xindex
tmp7 = tl.load(in_ptr0 + (x0 + 48 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 48 * x2), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (16 + x0 + 48 * x2), xmask, eviction_policy=
'evict_last')
tmp22 = tl.load(in_ptr0 + (x3 + 48 * x2), xmask)
tmp0 = x1
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tmp1 == tmp1
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = tmp1 == tmp4
tmp6 = tmp4 == tmp4
tmp9 = 1e-07
tmp10 = tmp8 + tmp9
tmp11 = tmp7 / tmp10
tmp12 = 0.3333333333333333
tmp13 = tmp11 * tmp12
tmp14 = tl.where(tmp6, tmp13, tmp11)
tmp16 = tmp15 / tmp10
tmp17 = tl.where(tmp5, tmp13, tmp16)
tmp18 = tl.where(tmp5, tmp14, tmp17)
tmp19 = tmp18 * tmp12
tmp20 = tl.where(tmp3, tmp19, tmp18)
tmp21 = tmp0 == tmp4
tmp23 = tmp22 / tmp10
tmp24 = tl.where(tmp21, tmp13, tmp23)
tmp25 = tl.where(tmp21, tmp14, tmp24)
tmp26 = tl.where(tmp2, tmp19, tmp25)
tmp27 = tl.where(tmp2, tmp20, tmp26)
tmp28 = 0.5
tmp29 = tmp27 - tmp28
tmp30 = 2.0
tmp31 = tmp29 * tmp30
tl.store(out_ptr0 + x4, tmp31, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 3, 4, 4), (48, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1), 0),
out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(192)](buf0, buf1, 192, XBLOCK=256,
num_warps=4, num_stages=1)
del buf0
buf2 = empty_strided_cuda((12, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (12, 4, 4), (16, 4, 1),
0), reinterpret_tensor(arg2_1, (12, 4, 4), (16, 4, 1), 0), out=buf2
)
del arg2_1
del buf1
buf3 = empty_strided_cuda((4, 4, 4, 2), (32, 4, 1, 16), torch.float32)
triton_poi_fused_mul_sub_1[grid(128)](buf2, buf3, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del buf2
return buf3,
class Project3DNew(nn.Module):
"""Layer which projects 3D points into a camera with intrinsics K and at position T
"""
def __init__(self, batch_size, height, width, eps=1e-07):
super(Project3DNew, self).__init__()
self.batch_size = batch_size
self.height = height
self.width = width
self.eps = eps
def forward(self, input_0, input_1, input_2):
arg2_1 = input_0
arg0_1 = input_1
arg1_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| JaviBite/TFG | Project3D | false | 2,407 | [
"MIT"
] | 0 | e406580697132f53b63a7c983daaa098af45b52c | https://github.com/JaviBite/TFG/tree/e406580697132f53b63a7c983daaa098af45b52c | import torch
from torch import nn
from functools import *
class Model(nn.Module):
"""Layer which projects 3D points into a camera with intrinsics K and at position T
"""
def __init__(self, batch_size, height, width, eps=1e-07):
super().__init__()
self.batch_size = batch_size
self.height = height
self.width = width
self.eps = eps
def forward(self, points, K, T):
P = torch.matmul(K, T)[:, :3, :]
cam_points = torch.matmul(P, points)
pix_coords = cam_points[:, :2, :] / (cam_points[:, 2, :].unsqueeze(
1) + self.eps)
pix_coords = pix_coords.view(self.batch_size, 2, self.height, self.
width)
pix_coords = pix_coords.permute(0, 2, 3, 1)
pix_coords[..., 0] /= self.width - 1
pix_coords[..., 1] /= self.height - 1
pix_coords = (pix_coords - 0.5) * 2
return pix_coords
def get_inputs():
return [torch.rand([4, 3, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
GlobalAveragePooling | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/l3/cl35tzbhrd24dhunkbb6gjs54aklpyr46oikqhoylcgmkcmhujil.py
# Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# mean => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [2, 3]), kwargs = {})
triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_0.run(buf1, arg0_1, 16, 16, grid=grid(16), stream=stream0)
del arg0_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class GlobalAveragePooling(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x.mean([2, 3])
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, arg0_1, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
del arg0_1
return buf1,
class GlobalAveragePoolingNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| HexagonPrime/pixel-nerf | GlobalAveragePooling | false | 2,409 | [
"BSD-2-Clause"
] | 0 | 298aa7a3451c01e6f19f73f0c756672d3de54bf9 | https://github.com/HexagonPrime/pixel-nerf/tree/298aa7a3451c01e6f19f73f0c756672d3de54bf9 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x.mean([2, 3])
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
AdaAttN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/pw/cpw5jgywzg5ntkknxkt5orxsrrr5zq7a6eoteboi3ba7zrcxj2p7.py
# Topologically Sorted Source Nodes: [G], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# G => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_6, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/me/cme2xhikd4knej4b3abj5wsh6uynxnmukvcgl6zc2xyicp2oo34b.py
# Topologically Sorted Source Nodes: [style_flat, pow_1], Original ATen: [aten.clone, aten.pow]
# Source node to ATen node mapping:
# pow_1 => pow_1
# style_flat => clone
# Graph fragment:
# %clone : [num_users=3] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
# %pow_1 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%clone, 2), kwargs = {})
triton_poi_fused_clone_pow_1 = async_compile.triton('triton_poi_fused_clone_pow_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_pow_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_pow_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = (yindex // 16)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tmp2 * tmp2
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
tl.store(out_ptr1 + (x2 + (4*y3)), tmp3, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/62/c62nqqevmdtdp3wxfzxcohhsleqbn6wke7ldngntegm5lngsl572.py
# Topologically Sorted Source Nodes: [S_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# S_1 => amax, div, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_per_fused__softmax_2 = async_compile.triton('triton_per_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_2(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + (16*x0)), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/aa/caanywtivicqu7zjqwxiejx62isp2mvzd5fljrjur3cieqw4w3tk.py
# Topologically Sorted Source Nodes: [var, feat_var, sqrt_1, mean_2], Original ATen: [aten.var, aten.add, aten.sqrt, aten.mean]
# Source node to ATen node mapping:
# feat_var => add
# mean_2 => mean
# sqrt_1 => sqrt_1
# var => var
# Graph fragment:
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%view_5, [2]), kwargs = {correction: 1})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%var, 1e-05), kwargs = {})
# %sqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view_5, [2]), kwargs = {})
triton_per_fused_add_mean_sqrt_var_3 = async_compile.triton('triton_per_fused_add_mean_sqrt_var_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_sqrt_var_3', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mean_sqrt_var_3(in_out_ptr0, in_out_ptr1, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp18 = tl.sum(tmp3, 1)[:, None]
tmp19 = 15.0
tmp20 = tmp16 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.sqrt(tmp22)
tmp24 = 16.0
tmp25 = tmp18 / tmp24
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp23, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x0), tmp25, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/hc/chcpozzanit2vza2jabxaja7at4redwwb6gn46vr7rmdhph6bypo.py
# Topologically Sorted Source Nodes: [mean_1, std_1, sub_1, normalized_feat, mul, add_1], Original ATen: [aten.clone, aten.sub, aten.div, aten.mul, aten.add]
# Source node to ATen node mapping:
# add_1 => add_1
# mean_1 => clone_1
# mul => mul
# normalized_feat => div_1
# std_1 => clone_2
# sub_1 => sub_2
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_2,), kwargs = {memory_format: torch.contiguous_format})
# %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_3,), kwargs = {memory_format: torch.contiguous_format})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_10, %expand), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_2, %expand_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clone_2, %div_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %clone_1), kwargs = {})
triton_poi_fused_add_clone_div_mul_sub_4 = async_compile.triton('triton_poi_fused_add_clone_div_mul_sub_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clone_div_mul_sub_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clone_div_mul_sub_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 16
y1 = (yindex // 16)
tmp0 = tl.load(in_ptr0 + (x2 + (4*y3)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + (4*y3)), xmask & ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + (x2 + (4*y1)), xmask & ymask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x2 + (4*y1)), xmask & ymask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp3 = tmp0 - tmp2
tmp4 = tl.full([1, 1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = libdevice.sqrt(tmp5)
tmp9 = tmp7 - tmp8
tmp11 = tmp9 / tmp10
tmp12 = tmp6 * tmp11
tmp13 = tmp12 + tmp1
tl.store(out_ptr0 + (y0 + (16*x2) + (64*y1)), tmp13, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_10, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [F], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
# Topologically Sorted Source Nodes: [G], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(primals_6, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
# Topologically Sorted Source Nodes: [H], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(primals_9, primals_7, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [G], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf3, primals_5, 256, grid=grid(256), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
buf11 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [style_flat, pow_1], Original ATen: [aten.clone, aten.pow]
triton_poi_fused_clone_pow_1.run(buf2, primals_8, buf4, buf11, 64, 4, grid=grid(64, 4), stream=stream0)
del primals_8
buf5 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [F], Original ATen: [aten.convolution]
triton_poi_fused_convolution_0.run(buf5, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
buf6 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [S], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf5, (4, 16, 4), (64, 1, 16), 0), reinterpret_tensor(buf3, (4, 4, 16), (64, 16, 1), 0), out=buf6)
buf9 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [S_1], Original ATen: [aten._softmax]
triton_per_fused__softmax_2.run(buf6, buf9, 64, 16, grid=grid(64), stream=stream0)
del buf6
buf10 = reinterpret_tensor(buf2, (4, 16, 4), (64, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [mean], Original ATen: [aten.bmm]
extern_kernels.bmm(buf9, buf4, out=buf10)
buf12 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, bmm_2], Original ATen: [aten.pow, aten.bmm]
extern_kernels.bmm(buf9, buf11, out=buf12)
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf16 = buf14; del buf14 # reuse
buf18 = buf17; del buf17 # reuse
# Topologically Sorted Source Nodes: [var, feat_var, sqrt_1, mean_2], Original ATen: [aten.var, aten.add, aten.sqrt, aten.mean]
triton_per_fused_add_mean_sqrt_var_3.run(buf16, buf18, primals_10, 16, 16, grid=grid(16), stream=stream0)
buf19 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean_1, std_1, sub_1, normalized_feat, mul, add_1], Original ATen: [aten.clone, aten.sub, aten.div, aten.mul, aten.add]
triton_poi_fused_add_clone_div_mul_sub_4.run(buf12, buf10, primals_10, buf18, buf16, buf19, 64, 4, grid=grid(64, 4), stream=stream0)
return (buf19, primals_1, primals_3, primals_4, primals_6, primals_7, primals_9, primals_10, buf4, buf9, buf10, buf12, reinterpret_tensor(buf16, (4, 4, 1, 1), (4, 1, 1, 1), 0), reinterpret_tensor(buf18, (4, 4, 1, 1), (4, 1, 1, 1), 0), reinterpret_tensor(buf11, (4, 4, 16), (64, 1, 4), 0), reinterpret_tensor(buf5, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(buf3, (4, 16, 4), (64, 1, 16), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch
import torch.nn as nn
def calc_mean_std(feat, eps=1e-05):
size = feat.size()
assert len(size) == 4
N, C = size[:2]
feat_var = feat.view(N, C, -1).var(dim=2) + eps
feat_std = feat_var.sqrt().view(N, C, 1, 1)
feat_mean = feat.view(N, C, -1).mean(dim=2).view(N, C, 1, 1)
return feat_mean, feat_std
def mean_variance_norm(feat):
size = feat.size()
mean, std = calc_mean_std(feat)
normalized_feat = (feat - mean.expand(size)) / std.expand(size)
return normalized_feat
class AdaAttN(nn.Module):
def __init__(self, in_planes, max_sample=256 * 256, key_planes=None):
super(AdaAttN, self).__init__()
if key_planes is None:
key_planes = in_planes
self.f = nn.Conv2d(key_planes, key_planes, (1, 1))
self.g = nn.Conv2d(key_planes, key_planes, (1, 1))
self.h = nn.Conv2d(in_planes, in_planes, (1, 1))
self.sm = nn.Softmax(dim=-1)
self.max_sample = max_sample
def forward(self, content, style, content_key, style_key, seed=None):
F = self.f(content_key)
G = self.g(style_key)
H = self.h(style)
b, _, h_g, w_g = G.size()
G = G.view(b, -1, w_g * h_g).contiguous()
if w_g * h_g > self.max_sample:
if seed is not None:
torch.manual_seed(seed)
index = torch.randperm(w_g * h_g)[:self.max_sample]
G = G[:, :, index]
style_flat = H.view(b, -1, w_g * h_g)[:, :, index].transpose(1, 2
).contiguous()
else:
style_flat = H.view(b, -1, w_g * h_g).transpose(1, 2).contiguous()
b, _, h, w = F.size()
F = F.view(b, -1, w * h).permute(0, 2, 1)
S = torch.bmm(F, G)
S = self.sm(S)
mean = torch.bmm(S, style_flat)
std = torch.sqrt(torch.relu(torch.bmm(S, style_flat ** 2) - mean ** 2))
mean = mean.view(b, h, w, -1).permute(0, 3, 1, 2).contiguous()
std = std.view(b, h, w, -1).permute(0, 3, 1, 2).contiguous()
return std * mean_variance_norm(content) + mean
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_planes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.utils.data
import torch
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_clone_pow_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tmp2 * tmp2
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
tl.store(out_ptr1 + (x2 + 4 * y3), tmp3, xmask & ymask)
@triton.jit
def triton_per_fused__softmax_2(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask)
@triton.jit
def triton_per_fused_add_mean_sqrt_var_3(in_out_ptr0, in_out_ptr1, in_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp18 = tl.sum(tmp3, 1)[:, None]
tmp19 = 15.0
tmp20 = tmp16 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.sqrt(tmp22)
tmp24 = 16.0
tmp25 = tmp18 / tmp24
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp23, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp25, xmask)
@triton.jit
def triton_poi_fused_add_clone_div_mul_sub_4(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr,
XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 16
y1 = yindex // 16
tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + 4 * y3), xmask & ymask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr2 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + (x2 + 4 * y1), xmask & ymask, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr4 + (x2 + 4 * y1), xmask & ymask, eviction_policy
='evict_last')
tmp2 = tmp1 * tmp1
tmp3 = tmp0 - tmp2
tmp4 = tl.full([1, 1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = libdevice.sqrt(tmp5)
tmp9 = tmp7 - tmp8
tmp11 = tmp9 / tmp10
tmp12 = tmp6 * tmp11
tmp13 = tmp12 + tmp1
tl.store(out_ptr0 + (y0 + 16 * x2 + 64 * y1), tmp13, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_10, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = extern_kernels.convolution(primals_6, primals_4, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = extern_kernels.convolution(primals_9, primals_7, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(256)](buf3, primals_5, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
buf11 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
triton_poi_fused_clone_pow_1[grid(64, 4)](buf2, primals_8, buf4,
buf11, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1)
del primals_8
buf5 = buf0
del buf0
triton_poi_fused_convolution_0[grid(256)](buf5, primals_2, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf6 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf5, (4, 16, 4), (64, 1, 16),
0), reinterpret_tensor(buf3, (4, 4, 16), (64, 16, 1), 0), out=buf6)
buf9 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
triton_per_fused__softmax_2[grid(64)](buf6, buf9, 64, 16, XBLOCK=32,
num_warps=4, num_stages=1)
del buf6
buf10 = reinterpret_tensor(buf2, (4, 16, 4), (64, 4, 1), 0)
del buf2
extern_kernels.bmm(buf9, buf4, out=buf10)
buf12 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
extern_kernels.bmm(buf9, buf11, out=buf12)
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf16 = buf14
del buf14
buf18 = buf17
del buf17
triton_per_fused_add_mean_sqrt_var_3[grid(16)](buf16, buf18,
primals_10, 16, 16, XBLOCK=1, num_warps=2, num_stages=1)
buf19 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_clone_div_mul_sub_4[grid(64, 4)](buf12, buf10,
primals_10, buf18, buf16, buf19, 64, 4, XBLOCK=4, YBLOCK=32,
num_warps=4, num_stages=1)
return (buf19, primals_1, primals_3, primals_4, primals_6, primals_7,
primals_9, primals_10, buf4, buf9, buf10, buf12, reinterpret_tensor
(buf16, (4, 4, 1, 1), (4, 1, 1, 1), 0), reinterpret_tensor(buf18, (
4, 4, 1, 1), (4, 1, 1, 1), 0), reinterpret_tensor(buf11, (4, 4, 16),
(64, 1, 4), 0), reinterpret_tensor(buf5, (4, 4, 16), (64, 16, 1), 0
), reinterpret_tensor(buf3, (4, 16, 4), (64, 1, 16), 0))
def calc_mean_std(feat, eps=1e-05):
size = feat.size()
assert len(size) == 4
N, C = size[:2]
feat_var = feat.view(N, C, -1).var(dim=2) + eps
feat_std = feat_var.sqrt().view(N, C, 1, 1)
feat_mean = feat.view(N, C, -1).mean(dim=2).view(N, C, 1, 1)
return feat_mean, feat_std
def mean_variance_norm(feat):
size = feat.size()
mean, std = calc_mean_std(feat)
normalized_feat = (feat - mean.expand(size)) / std.expand(size)
return normalized_feat
class AdaAttNNew(nn.Module):
def __init__(self, in_planes, max_sample=256 * 256, key_planes=None):
super(AdaAttNNew, self).__init__()
if key_planes is None:
key_planes = in_planes
self.f = nn.Conv2d(key_planes, key_planes, (1, 1))
self.g = nn.Conv2d(key_planes, key_planes, (1, 1))
self.h = nn.Conv2d(in_planes, in_planes, (1, 1))
self.sm = nn.Softmax(dim=-1)
self.max_sample = max_sample
def forward(self, input_0, input_1, input_2, input_3):
primals_1 = self.f.weight
primals_2 = self.f.bias
primals_4 = self.g.weight
primals_5 = self.g.bias
primals_7 = self.h.weight
primals_8 = self.h.bias
primals_3 = input_0
primals_6 = input_1
primals_9 = input_2
primals_10 = input_3
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
| JerryLeolfl/AdaAttN | AdaAttN | false | 2,412 | [
"MIT"
] | 0 | 062c66f7818b344e3730ce9d6df7af03f9acb4f5 | https://github.com/JerryLeolfl/AdaAttN/tree/062c66f7818b344e3730ce9d6df7af03f9acb4f5 | import torch
import torch.utils.data
import torch
import torch.nn as nn
def calc_mean_std(feat, eps=1e-05):
size = feat.size()
assert len(size) == 4
N, C = size[:2]
feat_var = feat.view(N, C, -1).var(dim=2) + eps
feat_std = feat_var.sqrt().view(N, C, 1, 1)
feat_mean = feat.view(N, C, -1).mean(dim=2).view(N, C, 1, 1)
return feat_mean, feat_std
def mean_variance_norm(feat):
size = feat.size()
mean, std = calc_mean_std(feat)
normalized_feat = (feat - mean.expand(size)) / std.expand(size)
return normalized_feat
class Model(nn.Module):
def __init__(self, in_planes, max_sample=256 * 256, key_planes=None):
super().__init__()
if key_planes is None:
key_planes = in_planes
self.f = nn.Conv2d(key_planes, key_planes, (1, 1))
self.g = nn.Conv2d(key_planes, key_planes, (1, 1))
self.h = nn.Conv2d(in_planes, in_planes, (1, 1))
self.sm = nn.Softmax(dim=-1)
self.max_sample = max_sample
def forward(self, content, style, content_key, style_key, seed=None):
F = self.f(content_key)
G = self.g(style_key)
H = self.h(style)
b, _, h_g, w_g = G.size()
G = G.view(b, -1, w_g * h_g).contiguous()
if w_g * h_g > self.max_sample:
if seed is not None:
torch.manual_seed(seed)
index = torch.randperm(w_g * h_g)[:self.max_sample]
G = G[:, :, index]
style_flat = H.view(b, -1, w_g * h_g)[:, :, index].transpose(1, 2
).contiguous()
else:
style_flat = H.view(b, -1, w_g * h_g).transpose(1, 2).contiguous()
b, _, h, w = F.size()
F = F.view(b, -1, w * h).permute(0, 2, 1)
S = torch.bmm(F, G)
S = self.sm(S)
mean = torch.bmm(S, style_flat)
std = torch.sqrt(torch.relu(torch.bmm(S, style_flat ** 2) - mean ** 2))
mean = mean.view(b, h, w, -1).permute(0, 3, 1, 2).contiguous()
std = std.view(b, h, w, -1).permute(0, 3, 1, 2).contiguous()
return std * mean_variance_norm(content) + mean
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
AddCoords | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/nz/cnzr3enannjni75kec3qorz6jm6lyd5whz6u5l3ih55bgihwnb2u.py
# Topologically Sorted Source Nodes: [ret], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# ret => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%arg0_1, %device_put, %device_put_1], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16) % 6
x3 = (xindex // 96)
x4 = xindex % 16
x1 = (xindex // 4) % 4
x0 = xindex % 4
x5 = xindex
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x4 + (16*x2) + (64*x3)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 5, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = x1
tmp11 = tmp10.to(tl.float32)
tmp12 = 0.3333333333333333
tmp13 = tmp11 * tmp12
tmp14 = 2.0
tmp15 = tmp13 * tmp14
tmp16 = 1.0
tmp17 = tmp15 - tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp9, tmp17, tmp18)
tmp20 = tmp0 >= tmp7
tmp21 = tl.full([1], 6, tl.int64)
tmp22 = tmp0 < tmp21
tmp23 = x0
tmp24 = tmp23.to(tl.float32)
tmp25 = tmp24 * tmp12
tmp26 = tmp25 * tmp14
tmp27 = tmp26 - tmp16
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp20, tmp27, tmp28)
tmp30 = tl.where(tmp9, tmp19, tmp29)
tmp31 = tl.where(tmp4, tmp5, tmp30)
tl.store(out_ptr0 + (x5), tmp31, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 6, 4, 4), (96, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [ret], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(arg0_1, buf0, 384, grid=grid(384), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class AddCoords(nn.Module):
"""
Source: https://github.com/mkocabas/CoordConv-pytorch/blob/master/CoordConv.py
"""
def __init__(self, with_r=False):
super().__init__()
self.with_r = with_r
def forward(self, input_tensor):
"""
Args:
input_tensor: shape(batch, channel, x_dim, y_dim)
"""
batch_size, _, x_dim, y_dim = input_tensor.size()
xx_channel = torch.arange(x_dim).repeat(1, y_dim, 1)
yy_channel = torch.arange(y_dim).repeat(1, x_dim, 1).transpose(1, 2)
xx_channel = xx_channel.float() / (x_dim - 1)
yy_channel = yy_channel.float() / (y_dim - 1)
xx_channel = xx_channel * 2 - 1
yy_channel = yy_channel * 2 - 1
xx_channel = xx_channel.repeat(batch_size, 1, 1, 1).transpose(2, 3)
yy_channel = yy_channel.repeat(batch_size, 1, 1, 1).transpose(2, 3)
ret = torch.cat([input_tensor, xx_channel.type_as(input_tensor),
yy_channel.type_as(input_tensor)], dim=1)
if self.with_r:
rr = torch.sqrt(torch.pow(xx_channel.type_as(input_tensor) -
0.5, 2) + torch.pow(yy_channel.type_as(input_tensor) - 0.5, 2))
ret = torch.cat([ret, rr], dim=1)
return ret
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16 % 6
x3 = xindex // 96
x4 = xindex % 16
x1 = xindex // 4 % 4
x0 = xindex % 4
x5 = xindex
tmp0 = x2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x4 + 16 * x2 + 64 * x3), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 5, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = x1
tmp11 = tmp10.to(tl.float32)
tmp12 = 0.3333333333333333
tmp13 = tmp11 * tmp12
tmp14 = 2.0
tmp15 = tmp13 * tmp14
tmp16 = 1.0
tmp17 = tmp15 - tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp9, tmp17, tmp18)
tmp20 = tmp0 >= tmp7
tl.full([1], 6, tl.int64)
tmp23 = x0
tmp24 = tmp23.to(tl.float32)
tmp25 = tmp24 * tmp12
tmp26 = tmp25 * tmp14
tmp27 = tmp26 - tmp16
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp20, tmp27, tmp28)
tmp30 = tl.where(tmp9, tmp19, tmp29)
tmp31 = tl.where(tmp4, tmp5, tmp30)
tl.store(out_ptr0 + x5, tmp31, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 6, 4, 4), (96, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(384)](arg0_1, buf0, 384, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class AddCoordsNew(nn.Module):
"""
Source: https://github.com/mkocabas/CoordConv-pytorch/blob/master/CoordConv.py
"""
def __init__(self, with_r=False):
super().__init__()
self.with_r = with_r
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| HexagonPrime/pixel-nerf | AddCoords | false | 2,413 | [
"BSD-2-Clause"
] | 0 | 298aa7a3451c01e6f19f73f0c756672d3de54bf9 | https://github.com/HexagonPrime/pixel-nerf/tree/298aa7a3451c01e6f19f73f0c756672d3de54bf9 | import torch
import torch.nn as nn
class Model(nn.Module):
"""
Source: https://github.com/mkocabas/CoordConv-pytorch/blob/master/CoordConv.py
"""
def __init__(self, with_r=False):
super().__init__()
self.with_r = with_r
def forward(self, input_tensor):
"""
Args:
input_tensor: shape(batch, channel, x_dim, y_dim)
"""
batch_size, _, x_dim, y_dim = input_tensor.size()
xx_channel = torch.arange(x_dim).repeat(1, y_dim, 1)
yy_channel = torch.arange(y_dim).repeat(1, x_dim, 1).transpose(1, 2)
xx_channel = xx_channel.float() / (x_dim - 1)
yy_channel = yy_channel.float() / (y_dim - 1)
xx_channel = xx_channel * 2 - 1
yy_channel = yy_channel * 2 - 1
xx_channel = xx_channel.repeat(batch_size, 1, 1, 1).transpose(2, 3)
yy_channel = yy_channel.repeat(batch_size, 1, 1, 1).transpose(2, 3)
ret = torch.cat([input_tensor, xx_channel.type_as(input_tensor),
yy_channel.type_as(input_tensor)], dim=1)
if self.with_r:
rr = torch.sqrt(torch.pow(xx_channel.type_as(input_tensor) -
0.5, 2) + torch.pow(yy_channel.type_as(input_tensor) - 0.5, 2))
ret = torch.cat([ret, rr], dim=1)
return ret
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
SSIM | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/kc/ckc5ftdtcxbcmqsxx57xrul2upk2ygczpppolriskw2ya5alzvp2.py
# Topologically Sorted Source Nodes: [x, y, mul], Original ATen: [aten.reflection_pad2d, aten.mul]
# Source node to ATen node mapping:
# mul => mul
# x => _unsafe_index, _unsafe_index_1
# y => _unsafe_index_2, _unsafe_index_3
# Graph fragment:
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_1 : [num_users=3] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_3]), kwargs = {})
# %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg1_1, [None, None, %sub_5, None]), kwargs = {})
# %_unsafe_index_3 : [num_users=3] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_2, [None, None, None, %sub_7]), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_1, %_unsafe_index_3), kwargs = {})
triton_poi_fused_mul_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_mul_reflection_pad2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_reflection_pad2d_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = (xindex // 6) % 6
x2 = (xindex // 36)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/lo/clow3kzmb7jev264jm5hdsre6kicqfeascpf6ijglocrlea2sell.py
# Topologically Sorted Source Nodes: [x, mu_x, mul_2, y, mu_y, mul_3, add, mul, avg_pool2d_4, mul_1, sigma_xy, mul_4, add_1, SSIM_n, pow_5, pow_6, add_2, add_3, pow_1, avg_pool2d_2, pow_2, sigma_x, pow_3, avg_pool2d_3, pow_4, sigma_y, add_4, add_5, SSIM_d, truediv, sub_3, truediv_1, clamp], Original ATen: [aten.reflection_pad2d, aten.avg_pool2d, aten.mul, aten.add, aten.sub, aten.pow, aten.div, aten.rsub, aten.clamp]
# Source node to ATen node mapping:
# SSIM_d => mul_6
# SSIM_n => mul_5
# add => add
# add_1 => add_1
# add_2 => add_2
# add_3 => add_3
# add_4 => add_4
# add_5 => add_5
# avg_pool2d_2 => avg_pool2d_2
# avg_pool2d_3 => avg_pool2d_3
# avg_pool2d_4 => avg_pool2d_4
# clamp => clamp_max, clamp_min
# mu_x => avg_pool2d
# mu_y => avg_pool2d_1
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# pow_1 => pow_1
# pow_2 => pow_2
# pow_3 => pow_3
# pow_4 => pow_4
# pow_5 => pow_5
# pow_6 => pow_6
# sigma_x => sub_8
# sigma_xy => sub_10
# sigma_y => sub_9
# sub_3 => sub_11
# truediv => div
# truediv_1 => div_1
# x => _unsafe_index, _unsafe_index_1
# y => _unsafe_index_2, _unsafe_index_3
# Graph fragment:
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_1 : [num_users=3] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_3]), kwargs = {})
# %avg_pool2d : [num_users=4] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%_unsafe_index_1, [3, 3], [1, 1]), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%avg_pool2d, 2), kwargs = {})
# %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg1_1, [None, None, %sub_5, None]), kwargs = {})
# %_unsafe_index_3 : [num_users=3] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_2, [None, None, None, %sub_7]), kwargs = {})
# %avg_pool2d_1 : [num_users=4] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%_unsafe_index_3, [3, 3], [1, 1]), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %avg_pool2d_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, 0.0001), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_1, %_unsafe_index_3), kwargs = {})
# %avg_pool2d_4 : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%mul, [3, 3], [1, 1]), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%avg_pool2d, %avg_pool2d_1), kwargs = {})
# %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%avg_pool2d_4, %mul_1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_10, 2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, 0.0009), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %add_1), kwargs = {})
# %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%avg_pool2d, 2), kwargs = {})
# %pow_6 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%avg_pool2d_1, 2), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_5, %pow_6), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, 0.0001), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%_unsafe_index_1, 2), kwargs = {})
# %avg_pool2d_2 : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%pow_1, [3, 3], [1, 1]), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%avg_pool2d, 2), kwargs = {})
# %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%avg_pool2d_2, %pow_2), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%_unsafe_index_3, 2), kwargs = {})
# %avg_pool2d_3 : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%pow_3, [3, 3], [1, 1]), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%avg_pool2d_1, 2), kwargs = {})
# %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%avg_pool2d_3, %pow_4), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_8, %sub_9), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, 0.0009), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_3, %add_5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_5, %mul_6), kwargs = {})
# %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_11, 2), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%div_1, 0), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 1), kwargs = {})
triton_poi_fused_add_avg_pool2d_clamp_div_mul_pow_reflection_pad2d_rsub_sub_1 = async_compile.triton('triton_poi_fused_add_avg_pool2d_clamp_div_mul_pow_reflection_pad2d_rsub_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_avg_pool2d_clamp_div_mul_pow_reflection_pad2d_rsub_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 27, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_avg_pool2d_clamp_div_mul_pow_reflection_pad2d_rsub_sub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (6*x1) + (36*x2)), xmask)
tmp1 = tl.load(in_ptr0 + (1 + x0 + (6*x1) + (36*x2)), xmask)
tmp3 = tl.load(in_ptr0 + (2 + x0 + (6*x1) + (36*x2)), xmask)
tmp5 = tl.load(in_ptr0 + (6 + x0 + (6*x1) + (36*x2)), xmask)
tmp7 = tl.load(in_ptr0 + (7 + x0 + (6*x1) + (36*x2)), xmask)
tmp9 = tl.load(in_ptr0 + (8 + x0 + (6*x1) + (36*x2)), xmask)
tmp11 = tl.load(in_ptr0 + (12 + x0 + (6*x1) + (36*x2)), xmask)
tmp13 = tl.load(in_ptr0 + (13 + x0 + (6*x1) + (36*x2)), xmask)
tmp15 = tl.load(in_ptr0 + (14 + x0 + (6*x1) + (36*x2)), xmask)
tmp19 = tl.load(in_ptr1 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr1 + (15 + ((-1)*(tl_math.abs((-3) + x0))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask)
tmp22 = tl.load(in_ptr1 + (15 + ((-1)*(tl_math.abs((-2) + x0))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask)
tmp24 = tl.load(in_ptr1 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + x1))) + (16*x2)), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr1 + (15 + ((-1)*(tl_math.abs((-3) + x0))) + ((-4)*(tl_math.abs((-3) + x1))) + (16*x2)), xmask)
tmp28 = tl.load(in_ptr1 + (15 + ((-1)*(tl_math.abs((-2) + x0))) + ((-4)*(tl_math.abs((-3) + x1))) + (16*x2)), xmask)
tmp30 = tl.load(in_ptr1 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-2) + x1))) + (16*x2)), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr1 + (15 + ((-1)*(tl_math.abs((-3) + x0))) + ((-4)*(tl_math.abs((-2) + x1))) + (16*x2)), xmask)
tmp34 = tl.load(in_ptr1 + (15 + ((-1)*(tl_math.abs((-2) + x0))) + ((-4)*(tl_math.abs((-2) + x1))) + (16*x2)), xmask)
tmp55 = tl.load(in_ptr2 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last')
tmp56 = tl.load(in_ptr2 + (15 + ((-1)*(tl_math.abs((-3) + x0))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask)
tmp58 = tl.load(in_ptr2 + (15 + ((-1)*(tl_math.abs((-2) + x0))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask)
tmp60 = tl.load(in_ptr2 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + x1))) + (16*x2)), xmask, eviction_policy='evict_last')
tmp62 = tl.load(in_ptr2 + (15 + ((-1)*(tl_math.abs((-3) + x0))) + ((-4)*(tl_math.abs((-3) + x1))) + (16*x2)), xmask)
tmp64 = tl.load(in_ptr2 + (15 + ((-1)*(tl_math.abs((-2) + x0))) + ((-4)*(tl_math.abs((-3) + x1))) + (16*x2)), xmask)
tmp66 = tl.load(in_ptr2 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-2) + x1))) + (16*x2)), xmask, eviction_policy='evict_last')
tmp68 = tl.load(in_ptr2 + (15 + ((-1)*(tl_math.abs((-3) + x0))) + ((-4)*(tl_math.abs((-2) + x1))) + (16*x2)), xmask)
tmp70 = tl.load(in_ptr2 + (15 + ((-1)*(tl_math.abs((-2) + x0))) + ((-4)*(tl_math.abs((-2) + x1))) + (16*x2)), xmask)
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp17 = 0.1111111111111111
tmp18 = tmp16 * tmp17
tmp21 = tmp20 + tmp19
tmp23 = tmp22 + tmp21
tmp25 = tmp24 + tmp23
tmp27 = tmp26 + tmp25
tmp29 = tmp28 + tmp27
tmp31 = tmp30 + tmp29
tmp33 = tmp32 + tmp31
tmp35 = tmp34 + tmp33
tmp36 = tmp35 * tmp17
tmp37 = tmp19 * tmp19
tmp38 = tmp20 * tmp20
tmp39 = tmp38 + tmp37
tmp40 = tmp22 * tmp22
tmp41 = tmp40 + tmp39
tmp42 = tmp24 * tmp24
tmp43 = tmp42 + tmp41
tmp44 = tmp26 * tmp26
tmp45 = tmp44 + tmp43
tmp46 = tmp28 * tmp28
tmp47 = tmp46 + tmp45
tmp48 = tmp30 * tmp30
tmp49 = tmp48 + tmp47
tmp50 = tmp32 * tmp32
tmp51 = tmp50 + tmp49
tmp52 = tmp34 * tmp34
tmp53 = tmp52 + tmp51
tmp54 = tmp53 * tmp17
tmp57 = tmp56 + tmp55
tmp59 = tmp58 + tmp57
tmp61 = tmp60 + tmp59
tmp63 = tmp62 + tmp61
tmp65 = tmp64 + tmp63
tmp67 = tmp66 + tmp65
tmp69 = tmp68 + tmp67
tmp71 = tmp70 + tmp69
tmp72 = tmp71 * tmp17
tmp73 = tmp55 * tmp55
tmp74 = tmp56 * tmp56
tmp75 = tmp74 + tmp73
tmp76 = tmp58 * tmp58
tmp77 = tmp76 + tmp75
tmp78 = tmp60 * tmp60
tmp79 = tmp78 + tmp77
tmp80 = tmp62 * tmp62
tmp81 = tmp80 + tmp79
tmp82 = tmp64 * tmp64
tmp83 = tmp82 + tmp81
tmp84 = tmp66 * tmp66
tmp85 = tmp84 + tmp83
tmp86 = tmp68 * tmp68
tmp87 = tmp86 + tmp85
tmp88 = tmp70 * tmp70
tmp89 = tmp88 + tmp87
tmp90 = tmp89 * tmp17
tmp91 = 2.0
tmp92 = tmp36 * tmp91
tmp93 = tmp92 * tmp72
tmp94 = 0.0001
tmp95 = tmp93 + tmp94
tmp96 = tmp36 * tmp72
tmp97 = tmp18 - tmp96
tmp98 = tmp97 * tmp91
tmp99 = 0.0009
tmp100 = tmp98 + tmp99
tmp101 = tmp95 * tmp100
tmp102 = tmp36 * tmp36
tmp103 = tmp72 * tmp72
tmp104 = tmp102 + tmp103
tmp105 = tmp104 + tmp94
tmp106 = tmp54 - tmp102
tmp107 = tmp90 - tmp103
tmp108 = tmp106 + tmp107
tmp109 = tmp108 + tmp99
tmp110 = tmp105 * tmp109
tmp111 = tmp101 / tmp110
tmp112 = 1.0
tmp113 = tmp112 - tmp111
tmp114 = 0.5
tmp115 = tmp113 * tmp114
tmp116 = 0.0
tmp117 = triton_helpers.maximum(tmp115, tmp116)
tmp118 = triton_helpers.minimum(tmp117, tmp112)
tl.store(in_out_ptr0 + (x3), tmp118, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, y, mul], Original ATen: [aten.reflection_pad2d, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_reflection_pad2d_0.run(arg0_1, arg1_1, buf2, 576, grid=grid(576), stream=stream0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf6 = buf0; del buf0 # reuse
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [x, mu_x, mul_2, y, mu_y, mul_3, add, mul, avg_pool2d_4, mul_1, sigma_xy, mul_4, add_1, SSIM_n, pow_5, pow_6, add_2, add_3, pow_1, avg_pool2d_2, pow_2, sigma_x, pow_3, avg_pool2d_3, pow_4, sigma_y, add_4, add_5, SSIM_d, truediv, sub_3, truediv_1, clamp], Original ATen: [aten.reflection_pad2d, aten.avg_pool2d, aten.mul, aten.add, aten.sub, aten.pow, aten.div, aten.rsub, aten.clamp]
triton_poi_fused_add_avg_pool2d_clamp_div_mul_pow_reflection_pad2d_rsub_sub_1.run(buf7, buf2, arg0_1, arg1_1, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
del buf2
return (buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from functools import *
class SSIM(nn.Module):
"""Layer to compute the SSIM loss between a pair of images
"""
def __init__(self):
super(SSIM, self).__init__()
self.mu_x_pool = nn.AvgPool2d(3, 1)
self.mu_y_pool = nn.AvgPool2d(3, 1)
self.sig_x_pool = nn.AvgPool2d(3, 1)
self.sig_y_pool = nn.AvgPool2d(3, 1)
self.sig_xy_pool = nn.AvgPool2d(3, 1)
self.refl = nn.ReflectionPad2d(1)
self.C1 = 0.01 ** 2
self.C2 = 0.03 ** 2
def forward(self, x, y):
x = self.refl(x)
y = self.refl(y)
mu_x = self.mu_x_pool(x)
mu_y = self.mu_y_pool(y)
sigma_x = self.sig_x_pool(x ** 2) - mu_x ** 2
sigma_y = self.sig_y_pool(y ** 2) - mu_y ** 2
sigma_xy = self.sig_xy_pool(x * y) - mu_x * mu_y
SSIM_n = (2 * mu_x * mu_y + self.C1) * (2 * sigma_xy + self.C2)
SSIM_d = (mu_x ** 2 + mu_y ** 2 + self.C1) * (sigma_x + sigma_y +
self.C2)
return torch.clamp((1 - SSIM_n / SSIM_d) / 2, 0, 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
from functools import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_reflection_pad2d_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_avg_pool2d_clamp_div_mul_pow_reflection_pad2d_rsub_sub_1(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 6 * x1 + 36 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (1 + x0 + 6 * x1 + 36 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (2 + x0 + 6 * x1 + 36 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (6 + x0 + 6 * x1 + 36 * x2), xmask)
tmp7 = tl.load(in_ptr0 + (7 + x0 + 6 * x1 + 36 * x2), xmask)
tmp9 = tl.load(in_ptr0 + (8 + x0 + 6 * x1 + 36 * x2), xmask)
tmp11 = tl.load(in_ptr0 + (12 + x0 + 6 * x1 + 36 * x2), xmask)
tmp13 = tl.load(in_ptr0 + (13 + x0 + 6 * x1 + 36 * x2), xmask)
tmp15 = tl.load(in_ptr0 + (14 + x0 + 6 * x1 + 36 * x2), xmask)
tmp19 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + x0) + -4 *
tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask)
tmp22 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-2 + x0) + -4 *
tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask)
tmp24 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + x1) + 16 * x2), xmask, eviction_policy
='evict_last')
tmp26 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + x0) + -4 *
tl_math.abs(-3 + x1) + 16 * x2), xmask)
tmp28 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-2 + x0) + -4 *
tl_math.abs(-3 + x1) + 16 * x2), xmask)
tmp30 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-2 + x1) + 16 * x2), xmask, eviction_policy
='evict_last')
tmp32 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + x0) + -4 *
tl_math.abs(-2 + x1) + 16 * x2), xmask)
tmp34 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-2 + x0) + -4 *
tl_math.abs(-2 + x1) + 16 * x2), xmask)
tmp55 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tmp56 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + x0) + -4 *
tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask)
tmp58 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-2 + x0) + -4 *
tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask)
tmp60 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + x1) + 16 * x2), xmask, eviction_policy
='evict_last')
tmp62 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + x0) + -4 *
tl_math.abs(-3 + x1) + 16 * x2), xmask)
tmp64 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-2 + x0) + -4 *
tl_math.abs(-3 + x1) + 16 * x2), xmask)
tmp66 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-2 + x1) + 16 * x2), xmask, eviction_policy
='evict_last')
tmp68 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + x0) + -4 *
tl_math.abs(-2 + x1) + 16 * x2), xmask)
tmp70 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-2 + x0) + -4 *
tl_math.abs(-2 + x1) + 16 * x2), xmask)
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp17 = 0.1111111111111111
tmp18 = tmp16 * tmp17
tmp21 = tmp20 + tmp19
tmp23 = tmp22 + tmp21
tmp25 = tmp24 + tmp23
tmp27 = tmp26 + tmp25
tmp29 = tmp28 + tmp27
tmp31 = tmp30 + tmp29
tmp33 = tmp32 + tmp31
tmp35 = tmp34 + tmp33
tmp36 = tmp35 * tmp17
tmp37 = tmp19 * tmp19
tmp38 = tmp20 * tmp20
tmp39 = tmp38 + tmp37
tmp40 = tmp22 * tmp22
tmp41 = tmp40 + tmp39
tmp42 = tmp24 * tmp24
tmp43 = tmp42 + tmp41
tmp44 = tmp26 * tmp26
tmp45 = tmp44 + tmp43
tmp46 = tmp28 * tmp28
tmp47 = tmp46 + tmp45
tmp48 = tmp30 * tmp30
tmp49 = tmp48 + tmp47
tmp50 = tmp32 * tmp32
tmp51 = tmp50 + tmp49
tmp52 = tmp34 * tmp34
tmp53 = tmp52 + tmp51
tmp54 = tmp53 * tmp17
tmp57 = tmp56 + tmp55
tmp59 = tmp58 + tmp57
tmp61 = tmp60 + tmp59
tmp63 = tmp62 + tmp61
tmp65 = tmp64 + tmp63
tmp67 = tmp66 + tmp65
tmp69 = tmp68 + tmp67
tmp71 = tmp70 + tmp69
tmp72 = tmp71 * tmp17
tmp73 = tmp55 * tmp55
tmp74 = tmp56 * tmp56
tmp75 = tmp74 + tmp73
tmp76 = tmp58 * tmp58
tmp77 = tmp76 + tmp75
tmp78 = tmp60 * tmp60
tmp79 = tmp78 + tmp77
tmp80 = tmp62 * tmp62
tmp81 = tmp80 + tmp79
tmp82 = tmp64 * tmp64
tmp83 = tmp82 + tmp81
tmp84 = tmp66 * tmp66
tmp85 = tmp84 + tmp83
tmp86 = tmp68 * tmp68
tmp87 = tmp86 + tmp85
tmp88 = tmp70 * tmp70
tmp89 = tmp88 + tmp87
tmp90 = tmp89 * tmp17
tmp91 = 2.0
tmp92 = tmp36 * tmp91
tmp93 = tmp92 * tmp72
tmp94 = 0.0001
tmp95 = tmp93 + tmp94
tmp96 = tmp36 * tmp72
tmp97 = tmp18 - tmp96
tmp98 = tmp97 * tmp91
tmp99 = 0.0009
tmp100 = tmp98 + tmp99
tmp101 = tmp95 * tmp100
tmp102 = tmp36 * tmp36
tmp103 = tmp72 * tmp72
tmp104 = tmp102 + tmp103
tmp105 = tmp104 + tmp94
tmp106 = tmp54 - tmp102
tmp107 = tmp90 - tmp103
tmp108 = tmp106 + tmp107
tmp109 = tmp108 + tmp99
tmp110 = tmp105 * tmp109
tmp111 = tmp101 / tmp110
tmp112 = 1.0
tmp113 = tmp112 - tmp111
tmp114 = 0.5
tmp115 = tmp113 * tmp114
tmp116 = 0.0
tmp117 = triton_helpers.maximum(tmp115, tmp116)
tmp118 = triton_helpers.minimum(tmp117, tmp112)
tl.store(in_out_ptr0 + x3, tmp118, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_reflection_pad2d_0[grid(576)](arg0_1, arg1_1,
buf2, 576, XBLOCK=256, num_warps=4, num_stages=1)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf6 = buf0
del buf0
buf7 = buf6
del buf6
triton_poi_fused_add_avg_pool2d_clamp_div_mul_pow_reflection_pad2d_rsub_sub_1[
grid(256)](buf7, buf2, arg0_1, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
del arg1_1
del buf2
return buf7,
class SSIMNew(nn.Module):
"""Layer to compute the SSIM loss between a pair of images
"""
def __init__(self):
super(SSIMNew, self).__init__()
self.mu_x_pool = nn.AvgPool2d(3, 1)
self.mu_y_pool = nn.AvgPool2d(3, 1)
self.sig_x_pool = nn.AvgPool2d(3, 1)
self.sig_y_pool = nn.AvgPool2d(3, 1)
self.sig_xy_pool = nn.AvgPool2d(3, 1)
self.refl = nn.ReflectionPad2d(1)
self.C1 = 0.01 ** 2
self.C2 = 0.03 ** 2
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| JaviBite/TFG | SSIM | false | 2,414 | [
"MIT"
] | 0 | e406580697132f53b63a7c983daaa098af45b52c | https://github.com/JaviBite/TFG/tree/e406580697132f53b63a7c983daaa098af45b52c | import torch
from torch import nn
from functools import *
class Model(nn.Module):
"""Layer to compute the SSIM loss between a pair of images
"""
def __init__(self):
super().__init__()
self.mu_x_pool = nn.AvgPool2d(3, 1)
self.mu_y_pool = nn.AvgPool2d(3, 1)
self.sig_x_pool = nn.AvgPool2d(3, 1)
self.sig_y_pool = nn.AvgPool2d(3, 1)
self.sig_xy_pool = nn.AvgPool2d(3, 1)
self.refl = nn.ReflectionPad2d(1)
self.C1 = 0.01 ** 2
self.C2 = 0.03 ** 2
def forward(self, x, y):
x = self.refl(x)
y = self.refl(y)
mu_x = self.mu_x_pool(x)
mu_y = self.mu_y_pool(y)
sigma_x = self.sig_x_pool(x ** 2) - mu_x ** 2
sigma_y = self.sig_y_pool(y ** 2) - mu_y ** 2
sigma_xy = self.sig_xy_pool(x * y) - mu_x * mu_y
SSIM_n = (2 * mu_x * mu_y + self.C1) * (2 * sigma_xy + self.C2)
SSIM_d = (mu_x ** 2 + mu_y ** 2 + self.C1) * (sigma_x + sigma_y +
self.C2)
return torch.clamp((1 - SSIM_n / SSIM_d) / 2, 0, 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Sine | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ej/cejzhnnynxtkiot2qt7feea4bkwhxo5g2qmtwe2jbyvjefkkzt6m.py
# Topologically Sorted Source Nodes: [mul, sin], Original ATen: [aten.mul, aten.sin]
# Source node to ATen node mapping:
# mul => mul
# sin => sin
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 30.0), kwargs = {})
# %sin : [num_users=1] = call_function[target=torch.ops.aten.sin.default](args = (%mul,), kwargs = {})
triton_poi_fused_mul_sin_0 = async_compile.triton('triton_poi_fused_mul_sin_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sin_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sin_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 30.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.sin(tmp2)
tl.store(out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, sin], Original ATen: [aten.mul, aten.sin]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_sin_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Sine(nn.Module):
"""Sine Activation Function."""
def __init__(self):
super().__init__()
def forward(self, x):
return torch.sin(30.0 * x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_sin_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 30.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.sin(tmp2)
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sin_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class SineNew(nn.Module):
"""Sine Activation Function."""
def __init__(self):
super().__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| HexagonPrime/pixel-nerf | Sine | false | 2,415 | [
"BSD-2-Clause"
] | 0 | 298aa7a3451c01e6f19f73f0c756672d3de54bf9 | https://github.com/HexagonPrime/pixel-nerf/tree/298aa7a3451c01e6f19f73f0c756672d3de54bf9 | import torch
import torch.nn as nn
class Model(nn.Module):
"""Sine Activation Function."""
def __init__(self):
super().__init__()
def forward(self, x):
return torch.sin(30.0 * x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
CoordConv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/nz/cnzr3enannjni75kec3qorz6jm6lyd5whz6u5l3ih55bgihwnb2u.py
# Topologically Sorted Source Nodes: [ret], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# ret => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %device_put, %device_put_1], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16) % 6
x3 = (xindex // 96)
x4 = xindex % 16
x1 = (xindex // 4) % 4
x0 = xindex % 4
x5 = xindex
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x4 + (16*x2) + (64*x3)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 5, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = x1
tmp11 = tmp10.to(tl.float32)
tmp12 = 0.3333333333333333
tmp13 = tmp11 * tmp12
tmp14 = 2.0
tmp15 = tmp13 * tmp14
tmp16 = 1.0
tmp17 = tmp15 - tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp9, tmp17, tmp18)
tmp20 = tmp0 >= tmp7
tmp21 = tl.full([1], 6, tl.int64)
tmp22 = tmp0 < tmp21
tmp23 = x0
tmp24 = tmp23.to(tl.float32)
tmp25 = tmp24 * tmp12
tmp26 = tmp25 * tmp14
tmp27 = tmp26 - tmp16
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp20, tmp27, tmp28)
tmp30 = tl.where(tmp9, tmp19, tmp29)
tmp31 = tl.where(tmp4, tmp5, tmp30)
tl.store(out_ptr0 + (x5), tmp31, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/tc/ctcagp37ljugm52zu6ckorigrppqo67voefe2f2odg5r6hyllhyu.py
# Topologically Sorted Source Nodes: [ret_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# ret_1 => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 6, 4, 4), (96, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 6, 4, 4), (96, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [ret], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, buf0, 384, grid=grid(384), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [ret_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [ret_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf2, primals_3, 16, grid=grid(16), stream=stream0)
del primals_3
return (buf2, primals_2, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 6, 4, 4), (96, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.nn.utils import spectral_norm
def mk_conv2d(*args, sn=False, **kwargs):
m = nn.Conv2d(*args, **kwargs)
if sn:
m = spectral_norm(m)
return m
class AddCoords(nn.Module):
"""
Source: https://github.com/mkocabas/CoordConv-pytorch/blob/master/CoordConv.py
"""
def __init__(self, with_r=False):
super().__init__()
self.with_r = with_r
def forward(self, input_tensor):
"""
Args:
input_tensor: shape(batch, channel, x_dim, y_dim)
"""
batch_size, _, x_dim, y_dim = input_tensor.size()
xx_channel = torch.arange(x_dim).repeat(1, y_dim, 1)
yy_channel = torch.arange(y_dim).repeat(1, x_dim, 1).transpose(1, 2)
xx_channel = xx_channel.float() / (x_dim - 1)
yy_channel = yy_channel.float() / (y_dim - 1)
xx_channel = xx_channel * 2 - 1
yy_channel = yy_channel * 2 - 1
xx_channel = xx_channel.repeat(batch_size, 1, 1, 1).transpose(2, 3)
yy_channel = yy_channel.repeat(batch_size, 1, 1, 1).transpose(2, 3)
ret = torch.cat([input_tensor, xx_channel.type_as(input_tensor),
yy_channel.type_as(input_tensor)], dim=1)
if self.with_r:
rr = torch.sqrt(torch.pow(xx_channel.type_as(input_tensor) -
0.5, 2) + torch.pow(yy_channel.type_as(input_tensor) - 0.5, 2))
ret = torch.cat([ret, rr], dim=1)
return ret
class CoordConv(nn.Module):
"""
Source: https://github.com/mkocabas/CoordConv-pytorch/blob/master/CoordConv.py
"""
def __init__(self, in_channels, out_channels, with_r=False, sn=False,
**kwargs):
super().__init__()
self.addcoords = AddCoords(with_r=with_r)
in_size = in_channels + 2
if with_r:
in_size += 1
self.conv = mk_conv2d(in_size, out_channels, sn=sn, **kwargs)
def forward(self, x):
ret = self.addcoords(x)
ret = self.conv(ret)
return ret
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.nn.utils import spectral_norm
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16 % 6
x3 = xindex // 96
x4 = xindex % 16
x1 = xindex // 4 % 4
x0 = xindex % 4
x5 = xindex
tmp0 = x2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x4 + 16 * x2 + 64 * x3), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 5, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = x1
tmp11 = tmp10.to(tl.float32)
tmp12 = 0.3333333333333333
tmp13 = tmp11 * tmp12
tmp14 = 2.0
tmp15 = tmp13 * tmp14
tmp16 = 1.0
tmp17 = tmp15 - tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp9, tmp17, tmp18)
tmp20 = tmp0 >= tmp7
tl.full([1], 6, tl.int64)
tmp23 = x0
tmp24 = tmp23.to(tl.float32)
tmp25 = tmp24 * tmp12
tmp26 = tmp25 * tmp14
tmp27 = tmp26 - tmp16
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp20, tmp27, tmp28)
tmp30 = tl.where(tmp9, tmp19, tmp29)
tmp31 = tl.where(tmp4, tmp5, tmp30)
tl.store(out_ptr0 + x5, tmp31, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 6, 4, 4), (96, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 6, 4, 4), (96, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(384)](primals_1, buf0, 384, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(16)](buf2, primals_3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
return buf2, primals_2, buf0
def mk_conv2d(*args, sn=False, **kwargs):
m = nn.Conv2d(*args, **kwargs)
if sn:
m = spectral_norm(m)
return m
class AddCoords(nn.Module):
"""
Source: https://github.com/mkocabas/CoordConv-pytorch/blob/master/CoordConv.py
"""
def __init__(self, with_r=False):
super().__init__()
self.with_r = with_r
def forward(self, input_tensor):
"""
Args:
input_tensor: shape(batch, channel, x_dim, y_dim)
"""
batch_size, _, x_dim, y_dim = input_tensor.size()
xx_channel = torch.arange(x_dim).repeat(1, y_dim, 1)
yy_channel = torch.arange(y_dim).repeat(1, x_dim, 1).transpose(1, 2)
xx_channel = xx_channel.float() / (x_dim - 1)
yy_channel = yy_channel.float() / (y_dim - 1)
xx_channel = xx_channel * 2 - 1
yy_channel = yy_channel * 2 - 1
xx_channel = xx_channel.repeat(batch_size, 1, 1, 1).transpose(2, 3)
yy_channel = yy_channel.repeat(batch_size, 1, 1, 1).transpose(2, 3)
ret = torch.cat([input_tensor, xx_channel.type_as(input_tensor),
yy_channel.type_as(input_tensor)], dim=1)
if self.with_r:
rr = torch.sqrt(torch.pow(xx_channel.type_as(input_tensor) -
0.5, 2) + torch.pow(yy_channel.type_as(input_tensor) - 0.5, 2))
ret = torch.cat([ret, rr], dim=1)
return ret
class CoordConvNew(nn.Module):
"""
Source: https://github.com/mkocabas/CoordConv-pytorch/blob/master/CoordConv.py
"""
def __init__(self, in_channels, out_channels, with_r=False, sn=False,
**kwargs):
super().__init__()
self.addcoords = AddCoords(with_r=with_r)
in_size = in_channels + 2
if with_r:
in_size += 1
self.conv = mk_conv2d(in_size, out_channels, sn=sn, **kwargs)
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| HexagonPrime/pixel-nerf | CoordConv | false | 2,416 | [
"BSD-2-Clause"
] | 0 | 298aa7a3451c01e6f19f73f0c756672d3de54bf9 | https://github.com/HexagonPrime/pixel-nerf/tree/298aa7a3451c01e6f19f73f0c756672d3de54bf9 | import torch
import torch.nn as nn
from torch.nn.utils import spectral_norm
def mk_conv2d(*args, sn=False, **kwargs):
m = nn.Conv2d(*args, **kwargs)
if sn:
m = spectral_norm(m)
return m
class AddCoords(nn.Module):
"""
Source: https://github.com/mkocabas/CoordConv-pytorch/blob/master/CoordConv.py
"""
def __init__(self, with_r=False):
super().__init__()
self.with_r = with_r
def forward(self, input_tensor):
"""
Args:
input_tensor: shape(batch, channel, x_dim, y_dim)
"""
batch_size, _, x_dim, y_dim = input_tensor.size()
xx_channel = torch.arange(x_dim).repeat(1, y_dim, 1)
yy_channel = torch.arange(y_dim).repeat(1, x_dim, 1).transpose(1, 2)
xx_channel = xx_channel.float() / (x_dim - 1)
yy_channel = yy_channel.float() / (y_dim - 1)
xx_channel = xx_channel * 2 - 1
yy_channel = yy_channel * 2 - 1
xx_channel = xx_channel.repeat(batch_size, 1, 1, 1).transpose(2, 3)
yy_channel = yy_channel.repeat(batch_size, 1, 1, 1).transpose(2, 3)
ret = torch.cat([input_tensor, xx_channel.type_as(input_tensor),
yy_channel.type_as(input_tensor)], dim=1)
if self.with_r:
rr = torch.sqrt(torch.pow(xx_channel.type_as(input_tensor) -
0.5, 2) + torch.pow(yy_channel.type_as(input_tensor) - 0.5, 2))
ret = torch.cat([ret, rr], dim=1)
return ret
class Model(nn.Module):
"""
Source: https://github.com/mkocabas/CoordConv-pytorch/blob/master/CoordConv.py
"""
def __init__(self, in_channels, out_channels, with_r=False, sn=False,
**kwargs):
super().__init__()
self.addcoords = AddCoords(with_r=with_r)
in_size = in_channels + 2
if with_r:
in_size += 1
self.conv = mk_conv2d(in_size, out_channels, sn=sn, **kwargs)
def forward(self, x):
ret = self.addcoords(x)
ret = self.conv(ret)
return ret
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
HighwayLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/qz/cqza6p5fjiie2hfiu5dfjqqugrnzziwuwxzlhzy2aa7khopxjbym.py
# Topologically Sorted Source Nodes: [gate_output], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# gate_output => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ln/clnagoyz3cauqnx3uxib3ikp6vd4zabtsvlhtc24rayt3qg4yaom.py
# Topologically Sorted Source Nodes: [transform_output, gate_output, transformation_part, type_as, sub, carry_part, add], Original ATen: [aten.relu, aten._softmax, aten.mul, aten._to_copy, aten.sub, aten.add]
# Source node to ATen node mapping:
# add => add
# carry_part => mul_1
# gate_output => div, sum_1
# sub => sub_1
# transform_output => relu
# transformation_part => mul
# type_as => full_default
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu, %div), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1], 1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%full_default, %div), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %primals_3), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
triton_poi_fused__softmax__to_copy_add_mul_relu_sub_1 = async_compile.triton('triton_poi_fused__softmax__to_copy_add_mul_relu_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax__to_copy_add_mul_relu_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax__to_copy_add_mul_relu_sub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (x3), xmask)
tmp15 = tl.load(in_ptr2 + (x3), xmask)
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp10 = tl.full([1], 0, tl.int32)
tmp11 = triton_helpers.maximum(tmp10, tmp9)
tmp12 = tmp11 * tmp8
tmp13 = 1.0
tmp14 = tmp13 - tmp8
tmp16 = tmp14 * tmp15
tmp17 = tmp12 + tmp16
tl.store(in_out_ptr0 + (x3), tmp17, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [gate_output], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf1, buf2, 256, grid=grid(256), stream=stream0)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [transform_output, gate_output, transformation_part, type_as, sub, carry_part, add], Original ATen: [aten.relu, aten._softmax, aten.mul, aten._to_copy, aten.sub, aten.add]
triton_poi_fused__softmax__to_copy_add_mul_relu_sub_1.run(buf4, buf2, buf0, primals_3, 256, grid=grid(256), stream=stream0)
del buf2
return (buf4, primals_3, buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.jit
import torch.jit.quantized
import torch.onnx.operators
class HighwayLayer(nn.Module):
def __init__(self, input_dim, transform_activation=F.relu,
gate_activation=F.softmax, gate_bias=-2):
super().__init__()
self.highway_transform_activation = transform_activation
self.highway_gate_activation = gate_activation
self.highway_transform = nn.Linear(input_dim, input_dim)
self.highway_gate = nn.Linear(input_dim, input_dim)
self.highway_gate.bias.data.fill_(gate_bias)
def forward(self, x):
transform_output = self.highway_transform_activation(self.
highway_transform(x))
gate_output = self.highway_gate_activation(self.highway_gate(x))
transformation_part = torch.mul(transform_output, gate_output)
carry_part = torch.mul(torch.FloatTensor([1.0]).type_as(gate_output
) - gate_output, x)
return torch.add(transformation_part, carry_part)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn.functional as F
import torch.nn as nn
import torch.jit
import torch.jit.quantized
import torch.onnx.operators
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax__to_copy_add_mul_relu_sub_1(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr1 + x3, xmask)
tmp15 = tl.load(in_ptr2 + x3, xmask)
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp10 = tl.full([1], 0, tl.int32)
tmp11 = triton_helpers.maximum(tmp10, tmp9)
tmp12 = tmp11 * tmp8
tmp13 = 1.0
tmp14 = tmp13 - tmp8
tmp16 = tmp14 * tmp15
tmp17 = tmp12 + tmp16
tl.store(in_out_ptr0 + x3, tmp17, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = buf3
del buf3
triton_poi_fused__softmax__to_copy_add_mul_relu_sub_1[grid(256)](buf4,
buf2, buf0, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf2
return buf4, primals_3, buf0, buf1
class HighwayLayerNew(nn.Module):
def __init__(self, input_dim, transform_activation=F.relu,
gate_activation=F.softmax, gate_bias=-2):
super().__init__()
self.highway_transform_activation = transform_activation
self.highway_gate_activation = gate_activation
self.highway_transform = nn.Linear(input_dim, input_dim)
self.highway_gate = nn.Linear(input_dim, input_dim)
self.highway_gate.bias.data.fill_(gate_bias)
def forward(self, input_0):
primals_1 = self.highway_transform.weight
primals_2 = self.highway_transform.bias
primals_4 = self.highway_gate.weight
primals_5 = self.highway_gate.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| Jeffyrao/translate | HighwayLayer | false | 2,417 | [
"BSD-3-Clause"
] | 0 | ab928e0b692f476c0a43ee7f9d0fbd3ecbada2b4 | https://github.com/Jeffyrao/translate/tree/ab928e0b692f476c0a43ee7f9d0fbd3ecbada2b4 | import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.jit
import torch.jit.quantized
import torch.onnx.operators
class Model(nn.Module):
def __init__(self, input_dim, transform_activation=F.relu,
gate_activation=F.softmax, gate_bias=-2):
super().__init__()
self.highway_transform_activation = transform_activation
self.highway_gate_activation = gate_activation
self.highway_transform = nn.Linear(input_dim, input_dim)
self.highway_gate = nn.Linear(input_dim, input_dim)
self.highway_gate.bias.data.fill_(gate_bias)
def forward(self, x):
transform_output = self.highway_transform_activation(self.
highway_transform(x))
gate_output = self.highway_gate_activation(self.highway_gate(x))
transformation_part = torch.mul(transform_output, gate_output)
carry_part = torch.mul(torch.FloatTensor([1.0]).type_as(gate_output
) - gate_output, x)
return torch.add(transformation_part, carry_part)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
ResnetBlockFC | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/6q/c6q46q7lsepa4jw5qgcgbc5kiud5wm57hubk6vfo4gk47vl2tprk.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%primals_1,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/mp/cmpdsbnpgfsr7uwb7env74mojrq3nlzieqot6rnnkfpbzkkensbi.py
# Topologically Sorted Source Nodes: [relu_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu_1 => relu_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/b3/cb3g6fwupaz5a5j23ckgaqji56bsmt4ixc37lwt344u76m75fqhf.py
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %view_3), kwargs = {})
triton_poi_fused_add_2 = async_compile.triton('triton_poi_fused_add_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_out_ptr0 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf2, primals_3, buf5, 256, grid=grid(256), stream=stream0)
del primals_3
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
triton_poi_fused_add_2.run(buf4, primals_1, primals_5, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_5
return (buf4, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_4, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.autograd.profiler as profiler
class ResnetBlockFC(nn.Module):
"""
Fully connected ResNet Block class.
Taken from DVR code.
:param size_in (int): input dimension
:param size_out (int): output dimension
:param size_h (int): hidden dimension
"""
def __init__(self, size_in, size_out=None, size_h=None, beta=0.0):
super().__init__()
if size_out is None:
size_out = size_in
if size_h is None:
size_h = min(size_in, size_out)
self.size_in = size_in
self.size_h = size_h
self.size_out = size_out
self.fc_0 = nn.Linear(size_in, size_h)
self.fc_1 = nn.Linear(size_h, size_out)
nn.init.constant_(self.fc_0.bias, 0.0)
nn.init.kaiming_normal_(self.fc_0.weight, a=0, mode='fan_in')
nn.init.constant_(self.fc_1.bias, 0.0)
nn.init.zeros_(self.fc_1.weight)
if beta > 0:
self.activation = nn.Softplus(beta=beta)
else:
self.activation = nn.ReLU()
if size_in == size_out:
self.shortcut = None
else:
self.shortcut = nn.Linear(size_in, size_out, bias=False)
nn.init.constant_(self.shortcut.bias, 0.0)
nn.init.kaiming_normal_(self.shortcut.weight, a=0, mode='fan_in')
def forward(self, x):
with profiler.record_function('resblock'):
net = self.fc_0(self.activation(x))
dx = self.fc_1(self.activation(net))
if self.shortcut is not None:
x_s = self.shortcut(x)
else:
x_s = x
return x_s + dx
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'size_in': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_relu_0[grid(256)](primals_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(256)](buf2,
primals_3, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
triton_poi_fused_add_2[grid(256)](buf4, primals_1, primals_5, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_5
return buf4, reinterpret_tensor(buf0, (64, 4), (4, 1), 0
), reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_4, buf5
class ResnetBlockFCNew(nn.Module):
"""
Fully connected ResNet Block class.
Taken from DVR code.
:param size_in (int): input dimension
:param size_out (int): output dimension
:param size_h (int): hidden dimension
"""
def __init__(self, size_in, size_out=None, size_h=None, beta=0.0):
super().__init__()
if size_out is None:
size_out = size_in
if size_h is None:
size_h = min(size_in, size_out)
self.size_in = size_in
self.size_h = size_h
self.size_out = size_out
self.fc_0 = nn.Linear(size_in, size_h)
self.fc_1 = nn.Linear(size_h, size_out)
nn.init.constant_(self.fc_0.bias, 0.0)
nn.init.kaiming_normal_(self.fc_0.weight, a=0, mode='fan_in')
nn.init.constant_(self.fc_1.bias, 0.0)
nn.init.zeros_(self.fc_1.weight)
if beta > 0:
self.activation = nn.Softplus(beta=beta)
else:
self.activation = nn.ReLU()
if size_in == size_out:
self.shortcut = None
else:
self.shortcut = nn.Linear(size_in, size_out, bias=False)
nn.init.constant_(self.shortcut.bias, 0.0)
nn.init.kaiming_normal_(self.shortcut.weight, a=0, mode='fan_in')
def forward(self, input_0):
primals_2 = self.fc_0.weight
primals_3 = self.fc_0.bias
primals_4 = self.fc_1.weight
primals_5 = self.fc_1.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| HexagonPrime/pixel-nerf | ResnetBlockFC | false | 2,418 | [
"BSD-2-Clause"
] | 0 | 298aa7a3451c01e6f19f73f0c756672d3de54bf9 | https://github.com/HexagonPrime/pixel-nerf/tree/298aa7a3451c01e6f19f73f0c756672d3de54bf9 | import torch
import torch.nn as nn
import torch.autograd.profiler as profiler
class Model(nn.Module):
"""
Fully connected ResNet Block class.
Taken from DVR code.
:param size_in (int): input dimension
:param size_out (int): output dimension
:param size_h (int): hidden dimension
"""
def __init__(self, size_in, size_out=None, size_h=None, beta=0.0):
super().__init__()
if size_out is None:
size_out = size_in
if size_h is None:
size_h = min(size_in, size_out)
self.size_in = size_in
self.size_h = size_h
self.size_out = size_out
self.fc_0 = nn.Linear(size_in, size_h)
self.fc_1 = nn.Linear(size_h, size_out)
nn.init.constant_(self.fc_0.bias, 0.0)
nn.init.kaiming_normal_(self.fc_0.weight, a=0, mode='fan_in')
nn.init.constant_(self.fc_1.bias, 0.0)
nn.init.zeros_(self.fc_1.weight)
if beta > 0:
self.activation = nn.Softplus(beta=beta)
else:
self.activation = nn.ReLU()
if size_in == size_out:
self.shortcut = None
else:
self.shortcut = nn.Linear(size_in, size_out, bias=False)
nn.init.constant_(self.shortcut.bias, 0.0)
nn.init.kaiming_normal_(self.shortcut.weight, a=0, mode='fan_in')
def forward(self, x):
with profiler.record_function('resblock'):
net = self.fc_0(self.activation(x))
dx = self.fc_1(self.activation(net))
if self.shortcut is not None:
x_s = self.shortcut(x)
else:
x_s = x
return x_s + dx
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
FiLMLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/z7/cz7adg57h546vw7xhiwlelni35acuxhugoxe4gwninslery5gnv4.py
# Topologically Sorted Source Nodes: [mul, add, sin], Original ATen: [aten.mul, aten.add, aten.sin]
# Source node to ATen node mapping:
# add => add
# mul => mul
# sin => sin
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expand, %view_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %expand_1), kwargs = {})
# %sin : [num_users=1] = call_function[target=torch.ops.aten.sin.default](args = (%add,), kwargs = {})
triton_poi_fused_add_mul_sin_0 = async_compile.triton('triton_poi_fused_add_mul_sin_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sin_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_sin_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x3), xmask)
tmp3 = tl.load(in_ptr2 + (x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl_math.sin(tmp4)
tl.store(out_ptr0 + (x3), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, add, sin], Original ATen: [aten.mul, aten.add, aten.sin]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_sin_0.run(primals_4, buf0, primals_5, buf1, 64, grid=grid(64), stream=stream0)
return (buf1, primals_4, primals_5, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class FiLMLayer(nn.Module):
def __init__(self, input_dim, hidden_dim):
super().__init__()
self.layer = nn.Linear(input_dim, hidden_dim)
def forward(self, x, freq, phase_shift):
x = self.layer(x)
freq = freq.unsqueeze(1).expand_as(x)
phase_shift = phase_shift.unsqueeze(1).expand_as(x)
return torch.sin(freq * x + phase_shift)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'hidden_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_sin_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp1 = tl.load(in_ptr1 + x3, xmask)
tmp3 = tl.load(in_ptr2 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl_math.sin(tmp4)
tl.store(out_ptr0 + x3, tmp5, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_sin_0[grid(64)](primals_4, buf0, primals_5,
buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
return buf1, primals_4, primals_5, reinterpret_tensor(primals_3, (16, 4
), (4, 1), 0), buf0
class FiLMLayerNew(nn.Module):
def __init__(self, input_dim, hidden_dim):
super().__init__()
self.layer = nn.Linear(input_dim, hidden_dim)
def forward(self, input_0, input_1, input_2):
primals_1 = self.layer.weight
primals_2 = self.layer.bias
primals_3 = input_0
primals_4 = input_1
primals_5 = input_2
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| HexagonPrime/pixel-nerf | FiLMLayer | false | 2,419 | [
"BSD-2-Clause"
] | 0 | 298aa7a3451c01e6f19f73f0c756672d3de54bf9 | https://github.com/HexagonPrime/pixel-nerf/tree/298aa7a3451c01e6f19f73f0c756672d3de54bf9 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, input_dim, hidden_dim):
super().__init__()
self.layer = nn.Linear(input_dim, hidden_dim)
def forward(self, x, freq, phase_shift):
x = self.layer(x)
freq = freq.unsqueeze(1).expand_as(x)
phase_shift = phase_shift.unsqueeze(1).expand_as(x)
return torch.sin(freq * x + phase_shift)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4, 4]
|
Conv3d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/dq/cdqln5h7mdv4sg4lweijkvma3pdseel2ohobukl6lpm6brka3uwu.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# output => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze, %primals_1, %primals_2, [1, 1, 1], [1, 1, 1], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 108
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 27)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 3, 3, 3), (108, 27, 9, 3, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 108, grid=grid(108), stream=stream0)
del primals_2
return (reinterpret_tensor(buf1, (4, 3, 3, 3), (27, 9, 3, 1), 0), primals_1, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Conv3d(nn.Module):
"""
This class is for a convolutional layer.
3d卷积
"""
def __init__(self, nIn, nOut, kSize, stride=1):
"""
:param nIn: number of input channels
:param nOut: number of output channels
:param kSize: kernel size
:param stride: optional stride rate for down-sampling
pytorch
in N, Ci, D, H, W
out N, Co, D, H, W
tensorflow
[batch, in_depth, in_height, in_width, in_channels] N,D,H,W,C
"""
super().__init__()
padding = int((kSize - 1) / 2)
self.conv = nn.Conv3d(nIn, nOut, (kSize, kSize, kSize), stride=
stride, padding=(padding, padding, padding), bias=True)
def forward(self, input):
"""
:param input: input feature map
:return: transformed feature map
"""
output = self.conv(input)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'nIn': 4, 'nOut': 4, 'kSize': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 108
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 27
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1,
4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1,
1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 3, 3, 3), (108, 27, 9, 3, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(108)](buf1, primals_2, 108,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return reinterpret_tensor(buf1, (4, 3, 3, 3), (27, 9, 3, 1), 0
), primals_1, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256,
64, 16, 4, 1), 0)
class Conv3dNew(nn.Module):
"""
This class is for a convolutional layer.
3d卷积
"""
def __init__(self, nIn, nOut, kSize, stride=1):
"""
:param nIn: number of input channels
:param nOut: number of output channels
:param kSize: kernel size
:param stride: optional stride rate for down-sampling
pytorch
in N, Ci, D, H, W
out N, Co, D, H, W
tensorflow
[batch, in_depth, in_height, in_width, in_channels] N,D,H,W,C
"""
super().__init__()
padding = int((kSize - 1) / 2)
self.conv = nn.Conv3d(nIn, nOut, (kSize, kSize, kSize), stride=
stride, padding=(padding, padding, padding), bias=True)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| IRLSCU/siamban | Conv3d | false | 2,421 | [
"Apache-2.0"
] | 0 | abb12d028e93aaee74efc5042a5bb305c7805053 | https://github.com/IRLSCU/siamban/tree/abb12d028e93aaee74efc5042a5bb305c7805053 | import torch
import torch.nn as nn
class Model(nn.Module):
"""
This class is for a convolutional layer.
3d卷积
"""
def __init__(self, nIn, nOut, kSize, stride=1):
"""
:param nIn: number of input channels
:param nOut: number of output channels
:param kSize: kernel size
:param stride: optional stride rate for down-sampling
pytorch
in N, Ci, D, H, W
out N, Co, D, H, W
tensorflow
[batch, in_depth, in_height, in_width, in_channels] N,D,H,W,C
"""
super().__init__()
padding = int((kSize - 1) / 2)
self.conv = nn.Conv3d(nIn, nOut, (kSize, kSize, kSize), stride=
stride, padding=(padding, padding, padding), bias=True)
def forward(self, input):
"""
:param input: input feature map
:return: transformed feature map
"""
output = self.conv(input)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
MultiheadAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/yd/cydbtjoq352gcolmflbvu2nqkda7xg7q5hnvltb47jsg5dbmubym.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/s2/cs2rk3o3kmhydx4oijp6rsdb5atcrq5axy4adadrpl7gkt7scies.py
# Topologically Sorted Source Nodes: [p_attn], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# p_attn => exp
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_11, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 1.0), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/3f/c3fx6bzkalkw7u7askqdnz4rzlcoyqiec4r434sjc5x3axxgkrmr.py
# Topologically Sorted Source Nodes: [p_attn], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# p_attn => div_1, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf0, buf3, 16, 4, grid=grid(16, 4), stream=stream0)
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf1, buf4, 16, 4, grid=grid(16, 4), stream=stream0)
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [p_attn], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf5, buf6, 256, grid=grid(256), stream=stream0)
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [p_attn], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf6, buf7, 256, grid=grid(256), stream=stream0)
del buf6
buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf2, buf8, 16, 4, grid=grid(16, 4), stream=stream0)
buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf9, buf10, 16, 4, grid=grid(16, 4), stream=stream0)
buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf11)
return (reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0), buf7, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), buf7, reinterpret_tensor(buf10, (16, 4), (4, 1), 0), primals_7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
import torch.jit
import torch.jit.quantized
import torch.onnx.operators
def combine_heads(X):
"""
Combine heads (the inverse of split heads):
1) Transpose X from (batch size, nheads, sequence length, d_head) to
(batch size, sequence length, nheads, d_head)
2) Combine (reshape) last 2 dimensions (nheads, d_head) into 1 (d_model)
Inputs:
X : [batch size * nheads, sequence length, d_head]
nheads : integer
d_head : integer
Outputs:
[batch_size, seq_len, d_model]
"""
X = X.transpose(1, 2)
nheads, d_head = X.shape[-2:]
return X.contiguous().view(list(X.shape[:-2]) + [nheads * d_head])
def create_src_lengths_mask(batch_size, src_lengths):
max_srclen = src_lengths.max()
src_indices = torch.arange(0, max_srclen).unsqueeze(0).type_as(src_lengths)
src_indices = src_indices.expand(batch_size, max_srclen)
src_lengths = src_lengths.unsqueeze(dim=1).expand(batch_size, max_srclen)
return (src_indices < src_lengths).int().detach()
def apply_masks(scores, batch_size, unseen_mask, src_lengths):
seq_len = scores.shape[-1]
sequence_mask = torch.ones(seq_len, seq_len).unsqueeze(0).int()
if unseen_mask:
sequence_mask = torch.tril(torch.ones(seq_len, seq_len), diagonal=0
).unsqueeze(0).int()
if src_lengths is not None:
src_lengths_mask = create_src_lengths_mask(batch_size=batch_size,
src_lengths=src_lengths).unsqueeze(-2)
sequence_mask = sequence_mask & src_lengths_mask
sequence_mask = sequence_mask.unsqueeze(1)
scores = scores.masked_fill(sequence_mask == 0, -np.inf)
return scores
def scaled_dot_prod_attn(query, key, value, unseen_mask=False, src_lengths=None
):
"""
Scaled Dot Product Attention
Implements equation:
Attention(Q, K, V) = softmax(QK^T/\\sqrt{d_k})V
Inputs:
query : [batch size, nheads, sequence length, d_k]
key : [batch size, nheads, sequence length, d_k]
value : [batch size, nheads, sequence length, d_v]
unseen_mask: if True, only attend to previous sequence positions
src_lengths_mask: if True, mask padding based on src_lengths
Outputs:
attn: [batch size, sequence length, d_v]
Note that in this implementation d_q = d_k = d_v = dim
"""
d_k = query.shape[-1]
scores = torch.matmul(query, key.transpose(2, 3)) / math.sqrt(d_k)
if unseen_mask or src_lengths is not None:
scores = apply_masks(scores=scores, batch_size=query.shape[0],
unseen_mask=unseen_mask, src_lengths=src_lengths)
p_attn = F.softmax(scores, dim=-1)
return torch.matmul(p_attn, value), p_attn
def split_heads(X, nheads):
"""
Split heads:
1) Split (reshape) last dimension (size d_model) into nheads, d_head
2) Transpose X from (batch size, sequence length, nheads, d_head) to
(batch size, nheads, sequence length, d_head)
Inputs:
X : [batch size, sequence length, nheads * d_head]
nheads : integer
Outputs:
[batch size, nheads, sequence length, d_head]
"""
last_dim = X.shape[-1]
assert last_dim % nheads == 0
X_last_dim_split = X.view(list(X.shape[:-1]) + [nheads, last_dim // nheads]
)
return X_last_dim_split.transpose(1, 2)
class MultiheadAttention(nn.Module):
"""
Multiheaded Scaled Dot Product Attention
Implements equation:
MultiHead(Q, K, V) = Concat(head_1,...,head_h)W^O
where head_i = Attention(QW_i^Q, KW_i^K, VW_i^V)
Similarly to the above, d_k = d_v = d_model / h
Inputs
init:
nheads : integer # of attention heads
d_model : model dimensionality
d_head : dimensionality of a single head
forward:
query : [batch size, sequence length, d_model]
key: [batch size, sequence length, d_model]
value: [batch size, sequence length, d_model]
unseen_mask: if True, only attend to previous sequence positions
src_lengths_mask: if True, mask padding based on src_lengths
Output
result : [batch_size, sequence length, d_model]
"""
def __init__(self, nheads, d_model):
"""Take in model size and number of heads."""
super(MultiheadAttention, self).__init__()
assert d_model % nheads == 0
self.d_head = d_model // nheads
self.nheads = nheads
self.Q_fc = nn.Linear(d_model, d_model, bias=False)
self.K_fc = nn.Linear(d_model, d_model, bias=False)
self.V_fc = nn.Linear(d_model, d_model, bias=False)
self.output_fc = nn.Linear(d_model, d_model, bias=False)
self.attn = None
def forward(self, query, key, value, unseen_mask=False, src_lengths=None):
query = split_heads(self.Q_fc(query), self.nheads)
key = split_heads(self.K_fc(key), self.nheads)
value = split_heads(self.V_fc(value), self.nheads)
x, self.attn = scaled_dot_prod_attn(query=query, key=key, value=
value, unseen_mask=unseen_mask, src_lengths=src_lengths)
x = combine_heads(x)
return self.output_fc(x)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'nheads': 4, 'd_model': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
import torch.jit
import torch.jit.quantized
import torch.onnx.operators
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 4)](buf0, buf3, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_clone_0[grid(16, 4)](buf1, buf4, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_2[grid(256)](buf6, buf7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf6
buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf1
triton_poi_fused_clone_0[grid(16, 4)](buf2, buf8, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_0[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0)
del buf9
extern_kernels.mm(reinterpret_tensor(buf10, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf11)
return reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0
), buf7, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_4, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf10, (16, 4), (4, 1), 0
), primals_7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0)
def combine_heads(X):
"""
Combine heads (the inverse of split heads):
1) Transpose X from (batch size, nheads, sequence length, d_head) to
(batch size, sequence length, nheads, d_head)
2) Combine (reshape) last 2 dimensions (nheads, d_head) into 1 (d_model)
Inputs:
X : [batch size * nheads, sequence length, d_head]
nheads : integer
d_head : integer
Outputs:
[batch_size, seq_len, d_model]
"""
X = X.transpose(1, 2)
nheads, d_head = X.shape[-2:]
return X.contiguous().view(list(X.shape[:-2]) + [nheads * d_head])
def create_src_lengths_mask(batch_size, src_lengths):
max_srclen = src_lengths.max()
src_indices = torch.arange(0, max_srclen).unsqueeze(0).type_as(src_lengths)
src_indices = src_indices.expand(batch_size, max_srclen)
src_lengths = src_lengths.unsqueeze(dim=1).expand(batch_size, max_srclen)
return (src_indices < src_lengths).int().detach()
def apply_masks(scores, batch_size, unseen_mask, src_lengths):
seq_len = scores.shape[-1]
sequence_mask = torch.ones(seq_len, seq_len).unsqueeze(0).int()
if unseen_mask:
sequence_mask = torch.tril(torch.ones(seq_len, seq_len), diagonal=0
).unsqueeze(0).int()
if src_lengths is not None:
src_lengths_mask = create_src_lengths_mask(batch_size=batch_size,
src_lengths=src_lengths).unsqueeze(-2)
sequence_mask = sequence_mask & src_lengths_mask
sequence_mask = sequence_mask.unsqueeze(1)
scores = scores.masked_fill(sequence_mask == 0, -np.inf)
return scores
def scaled_dot_prod_attn(query, key, value, unseen_mask=False, src_lengths=None
):
"""
Scaled Dot Product Attention
Implements equation:
Attention(Q, K, V) = softmax(QK^T/\\sqrt{d_k})V
Inputs:
query : [batch size, nheads, sequence length, d_k]
key : [batch size, nheads, sequence length, d_k]
value : [batch size, nheads, sequence length, d_v]
unseen_mask: if True, only attend to previous sequence positions
src_lengths_mask: if True, mask padding based on src_lengths
Outputs:
attn: [batch size, sequence length, d_v]
Note that in this implementation d_q = d_k = d_v = dim
"""
d_k = query.shape[-1]
scores = torch.matmul(query, key.transpose(2, 3)) / math.sqrt(d_k)
if unseen_mask or src_lengths is not None:
scores = apply_masks(scores=scores, batch_size=query.shape[0],
unseen_mask=unseen_mask, src_lengths=src_lengths)
p_attn = F.softmax(scores, dim=-1)
return torch.matmul(p_attn, value), p_attn
def split_heads(X, nheads):
"""
Split heads:
1) Split (reshape) last dimension (size d_model) into nheads, d_head
2) Transpose X from (batch size, sequence length, nheads, d_head) to
(batch size, nheads, sequence length, d_head)
Inputs:
X : [batch size, sequence length, nheads * d_head]
nheads : integer
Outputs:
[batch size, nheads, sequence length, d_head]
"""
last_dim = X.shape[-1]
assert last_dim % nheads == 0
X_last_dim_split = X.view(list(X.shape[:-1]) + [nheads, last_dim // nheads]
)
return X_last_dim_split.transpose(1, 2)
class MultiheadAttentionNew(nn.Module):
"""
Multiheaded Scaled Dot Product Attention
Implements equation:
MultiHead(Q, K, V) = Concat(head_1,...,head_h)W^O
where head_i = Attention(QW_i^Q, KW_i^K, VW_i^V)
Similarly to the above, d_k = d_v = d_model / h
Inputs
init:
nheads : integer # of attention heads
d_model : model dimensionality
d_head : dimensionality of a single head
forward:
query : [batch size, sequence length, d_model]
key: [batch size, sequence length, d_model]
value: [batch size, sequence length, d_model]
unseen_mask: if True, only attend to previous sequence positions
src_lengths_mask: if True, mask padding based on src_lengths
Output
result : [batch_size, sequence length, d_model]
"""
def __init__(self, nheads, d_model):
"""Take in model size and number of heads."""
super(MultiheadAttentionNew, self).__init__()
assert d_model % nheads == 0
self.d_head = d_model // nheads
self.nheads = nheads
self.Q_fc = nn.Linear(d_model, d_model, bias=False)
self.K_fc = nn.Linear(d_model, d_model, bias=False)
self.V_fc = nn.Linear(d_model, d_model, bias=False)
self.output_fc = nn.Linear(d_model, d_model, bias=False)
self.attn = None
def forward(self, input_0, input_1, input_2):
primals_1 = self.Q_fc.weight
primals_3 = self.K_fc.weight
primals_5 = self.V_fc.weight
primals_7 = self.output_fc.weight
primals_2 = input_0
primals_4 = input_1
primals_6 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| Jeffyrao/translate | MultiheadAttention | false | 2,422 | [
"BSD-3-Clause"
] | 0 | ab928e0b692f476c0a43ee7f9d0fbd3ecbada2b4 | https://github.com/Jeffyrao/translate/tree/ab928e0b692f476c0a43ee7f9d0fbd3ecbada2b4 | import math
import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
import torch.jit
import torch.jit.quantized
import torch.onnx.operators
def combine_heads(X):
"""
Combine heads (the inverse of split heads):
1) Transpose X from (batch size, nheads, sequence length, d_head) to
(batch size, sequence length, nheads, d_head)
2) Combine (reshape) last 2 dimensions (nheads, d_head) into 1 (d_model)
Inputs:
X : [batch size * nheads, sequence length, d_head]
nheads : integer
d_head : integer
Outputs:
[batch_size, seq_len, d_model]
"""
X = X.transpose(1, 2)
nheads, d_head = X.shape[-2:]
return X.contiguous().view(list(X.shape[:-2]) + [nheads * d_head])
def create_src_lengths_mask(batch_size, src_lengths):
max_srclen = src_lengths.max()
src_indices = torch.arange(0, max_srclen).unsqueeze(0).type_as(src_lengths)
src_indices = src_indices.expand(batch_size, max_srclen)
src_lengths = src_lengths.unsqueeze(dim=1).expand(batch_size, max_srclen)
return (src_indices < src_lengths).int().detach()
def apply_masks(scores, batch_size, unseen_mask, src_lengths):
seq_len = scores.shape[-1]
sequence_mask = torch.ones(seq_len, seq_len).unsqueeze(0).int()
if unseen_mask:
sequence_mask = torch.tril(torch.ones(seq_len, seq_len), diagonal=0
).unsqueeze(0).int()
if src_lengths is not None:
src_lengths_mask = create_src_lengths_mask(batch_size=batch_size,
src_lengths=src_lengths).unsqueeze(-2)
sequence_mask = sequence_mask & src_lengths_mask
sequence_mask = sequence_mask.unsqueeze(1)
scores = scores.masked_fill(sequence_mask == 0, -np.inf)
return scores
def scaled_dot_prod_attn(query, key, value, unseen_mask=False, src_lengths=None
):
"""
Scaled Dot Product Attention
Implements equation:
Attention(Q, K, V) = softmax(QK^T/\\sqrt{d_k})V
Inputs:
query : [batch size, nheads, sequence length, d_k]
key : [batch size, nheads, sequence length, d_k]
value : [batch size, nheads, sequence length, d_v]
unseen_mask: if True, only attend to previous sequence positions
src_lengths_mask: if True, mask padding based on src_lengths
Outputs:
attn: [batch size, sequence length, d_v]
Note that in this implementation d_q = d_k = d_v = dim
"""
d_k = query.shape[-1]
scores = torch.matmul(query, key.transpose(2, 3)) / math.sqrt(d_k)
if unseen_mask or src_lengths is not None:
scores = apply_masks(scores=scores, batch_size=query.shape[0],
unseen_mask=unseen_mask, src_lengths=src_lengths)
p_attn = F.softmax(scores, dim=-1)
return torch.matmul(p_attn, value), p_attn
def split_heads(X, nheads):
"""
Split heads:
1) Split (reshape) last dimension (size d_model) into nheads, d_head
2) Transpose X from (batch size, sequence length, nheads, d_head) to
(batch size, nheads, sequence length, d_head)
Inputs:
X : [batch size, sequence length, nheads * d_head]
nheads : integer
Outputs:
[batch size, nheads, sequence length, d_head]
"""
last_dim = X.shape[-1]
assert last_dim % nheads == 0
X_last_dim_split = X.view(list(X.shape[:-1]) + [nheads, last_dim // nheads]
)
return X_last_dim_split.transpose(1, 2)
class Model(nn.Module):
"""
Multiheaded Scaled Dot Product Attention
Implements equation:
MultiHead(Q, K, V) = Concat(head_1,...,head_h)W^O
where head_i = Attention(QW_i^Q, KW_i^K, VW_i^V)
Similarly to the above, d_k = d_v = d_model / h
Inputs
init:
nheads : integer # of attention heads
d_model : model dimensionality
d_head : dimensionality of a single head
forward:
query : [batch size, sequence length, d_model]
key: [batch size, sequence length, d_model]
value: [batch size, sequence length, d_model]
# ... truncated (>4000 chars) for memory efficiency |
tfAvgPool3D | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/j4/cj47gjxddooj3qeijcb25ywjhwfl5gx75ggkyzuyl3muz5nxshah.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.constant_pad_nd, aten.avg_pool3d]
# Source node to ATen node mapping:
# x => constant_pad_nd
# x_1 => avg_pool3d
# Graph fragment:
# %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%arg0_1, [0, 1, 0, 1], 0.0), kwargs = {})
# %avg_pool3d : [num_users=3] = call_function[target=torch.ops.aten.avg_pool3d.default](args = (%constant_pad_nd, [1, 3, 3], [1, 2, 2]), kwargs = {})
triton_poi_fused_avg_pool3d_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_avg_pool3d_constant_pad_nd_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool3d_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool3d_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 2) % 2
x0 = xindex % 2
x3 = (xindex // 2)
x4 = xindex
tmp0 = 2*x1
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = 2*x0
tmp4 = tmp3 < tmp1
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + ((2*x0) + (8*x3)), tmp5 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = 1 + (2*x0)
tmp8 = tmp7 < tmp1
tmp9 = tmp2 & tmp8
tmp10 = tl.load(in_ptr0 + (1 + (2*x0) + (8*x3)), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp10 + tmp6
tmp12 = 2 + (2*x0)
tmp13 = tmp12 < tmp1
tmp14 = tmp2 & tmp13
tmp15 = tl.load(in_ptr0 + (2 + (2*x0) + (8*x3)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tmp15 + tmp11
tmp17 = 1 + (2*x1)
tmp18 = tmp17 < tmp1
tmp19 = tmp18 & tmp4
tmp20 = tl.load(in_ptr0 + (4 + (2*x0) + (8*x3)), tmp19 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = tmp20 + tmp16
tmp22 = tmp18 & tmp8
tmp23 = tl.load(in_ptr0 + (5 + (2*x0) + (8*x3)), tmp22 & xmask, eviction_policy='evict_last', other=0.0)
tmp24 = tmp23 + tmp21
tmp25 = tmp18 & tmp13
tmp26 = tl.load(in_ptr0 + (6 + (2*x0) + (8*x3)), tmp25 & xmask, eviction_policy='evict_last', other=0.0)
tmp27 = tmp26 + tmp24
tmp28 = 2 + (2*x1)
tmp29 = tmp28 < tmp1
tmp30 = tmp29 & tmp4
tmp31 = tl.load(in_ptr0 + (8 + (2*x0) + (8*x3)), tmp30 & xmask, eviction_policy='evict_last', other=0.0)
tmp32 = tmp31 + tmp27
tmp33 = tmp29 & tmp8
tmp34 = tl.load(in_ptr0 + (9 + (2*x0) + (8*x3)), tmp33 & xmask, eviction_policy='evict_last', other=0.0)
tmp35 = tmp34 + tmp32
tmp36 = tmp29 & tmp13
tmp37 = tl.load(in_ptr0 + (10 + (2*x0) + (8*x3)), tmp36 & xmask, eviction_policy='evict_last', other=0.0)
tmp38 = tmp37 + tmp35
tmp39 = 0.1111111111111111
tmp40 = tmp38 * tmp39
tl.store(out_ptr0 + (x4), tmp40, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/wu/cwumdhgny2yfc2fqegw3q6scmzh4l35mrpxzpy7u3hofzgr4dlgf.py
# Topologically Sorted Source Nodes: [mul, truediv, setitem, mul_1, truediv_1, setitem_1], Original ATen: [aten.mul, aten.div, aten.copy]
# Source node to ATen node mapping:
# mul => mul
# mul_1 => mul_1
# setitem => copy
# setitem_1 => copy_1
# truediv => div
# truediv_1 => div_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select, 9), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, 6), kwargs = {})
# %copy : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_1, %div), kwargs = {})
# %select_scatter_default : [num_users=4] = call_function[target=torch.ops.aten.select_scatter.default](args = (%avg_pool3d, %copy, 3, -1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_4, 9), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, 6), kwargs = {})
# %copy_1 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_6, %div_1), kwargs = {})
# %select_scatter_default_1 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default, %copy_1, 2, -1), kwargs = {})
triton_poi_fused_copy_div_mul_1 = async_compile.triton('triton_poi_fused_copy_div_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_copy_div_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_copy_div_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 2) % 2
x0 = xindex % 2
x2 = (xindex // 4)
x4 = (xindex // 2)
x5 = xindex
tmp5 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (1 + (2*x4)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (x5), xmask)
tmp0 = x1
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = x0
tmp4 = tmp3 == tmp1
tmp6 = 9.0
tmp7 = tmp5 * tmp6
tmp8 = 0.16666666666666666
tmp9 = tmp7 * tmp8
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp11 * tmp6
tmp13 = tmp12 * tmp8
tmp15 = tmp14 * tmp6
tmp16 = tmp15 * tmp8
tmp18 = tl.where(tmp4, tmp16, tmp17)
tmp19 = tl.where(tmp2, tmp13, tmp18)
tl.store(out_ptr0 + (x5), tmp19, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.constant_pad_nd, aten.avg_pool3d]
stream0 = get_raw_stream(0)
triton_poi_fused_avg_pool3d_constant_pad_nd_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, truediv, setitem, mul_1, truediv_1, setitem_1], Original ATen: [aten.mul, aten.div, aten.copy]
triton_poi_fused_copy_div_mul_1.run(buf0, buf1, 64, grid=grid(64), stream=stream0)
del buf0
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import Tensor
from torch import nn
class tfAvgPool3D(nn.Module):
def __init__(self) ->None:
super().__init__()
self.avgf = nn.AvgPool3d((1, 3, 3), stride=(1, 2, 2))
def forward(self, x: 'Tensor') ->Tensor:
if x.shape[-1] != x.shape[-2]:
raise RuntimeError('only same shape for h and w ' +
'are supported by avg with tf_like')
if x.shape[-1] != x.shape[-2]:
raise RuntimeError('only same shape for h and w ' +
'are supported by avg with tf_like')
f1 = x.shape[-1] % 2 != 0
if f1:
padding_pad = 0, 0, 0, 0
else:
padding_pad = 0, 1, 0, 1
x = torch.nn.functional.pad(x, padding_pad)
if f1:
x = torch.nn.functional.avg_pool3d(x, (1, 3, 3), stride=(1, 2,
2), count_include_pad=False, padding=(0, 1, 1))
else:
x = self.avgf(x)
x[..., -1] = x[..., -1] * 9 / 6
x[..., -1, :] = x[..., -1, :] * 9 / 6
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_avg_pool3d_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 2 % 2
x0 = xindex % 2
x3 = xindex // 2
x4 = xindex
tmp0 = 2 * x1
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = 2 * x0
tmp4 = tmp3 < tmp1
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (2 * x0 + 8 * x3), tmp5 & xmask,
eviction_policy='evict_last', other=0.0)
tmp7 = 1 + 2 * x0
tmp8 = tmp7 < tmp1
tmp9 = tmp2 & tmp8
tmp10 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x3), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp10 + tmp6
tmp12 = 2 + 2 * x0
tmp13 = tmp12 < tmp1
tmp14 = tmp2 & tmp13
tmp15 = tl.load(in_ptr0 + (2 + 2 * x0 + 8 * x3), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tmp15 + tmp11
tmp17 = 1 + 2 * x1
tmp18 = tmp17 < tmp1
tmp19 = tmp18 & tmp4
tmp20 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x3), tmp19 & xmask,
eviction_policy='evict_last', other=0.0)
tmp21 = tmp20 + tmp16
tmp22 = tmp18 & tmp8
tmp23 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x3), tmp22 & xmask,
eviction_policy='evict_last', other=0.0)
tmp24 = tmp23 + tmp21
tmp25 = tmp18 & tmp13
tmp26 = tl.load(in_ptr0 + (6 + 2 * x0 + 8 * x3), tmp25 & xmask,
eviction_policy='evict_last', other=0.0)
tmp27 = tmp26 + tmp24
tmp28 = 2 + 2 * x1
tmp29 = tmp28 < tmp1
tmp30 = tmp29 & tmp4
tmp31 = tl.load(in_ptr0 + (8 + 2 * x0 + 8 * x3), tmp30 & xmask,
eviction_policy='evict_last', other=0.0)
tmp32 = tmp31 + tmp27
tmp33 = tmp29 & tmp8
tmp34 = tl.load(in_ptr0 + (9 + 2 * x0 + 8 * x3), tmp33 & xmask,
eviction_policy='evict_last', other=0.0)
tmp35 = tmp34 + tmp32
tmp36 = tmp29 & tmp13
tmp37 = tl.load(in_ptr0 + (10 + 2 * x0 + 8 * x3), tmp36 & xmask,
eviction_policy='evict_last', other=0.0)
tmp38 = tmp37 + tmp35
tmp39 = 0.1111111111111111
tmp40 = tmp38 * tmp39
tl.store(out_ptr0 + x4, tmp40, xmask)
@triton.jit
def triton_poi_fused_copy_div_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 2 % 2
x0 = xindex % 2
x2 = xindex // 4
x4 = xindex // 2
x5 = xindex
tmp5 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + x0 + 4 * x2), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr0 + (1 + 2 * x4), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr0 + x5, xmask)
tmp0 = x1
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = x0
tmp4 = tmp3 == tmp1
tmp6 = 9.0
tmp7 = tmp5 * tmp6
tmp8 = 0.16666666666666666
tmp9 = tmp7 * tmp8
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp11 * tmp6
tmp13 = tmp12 * tmp8
tmp15 = tmp14 * tmp6
tmp16 = tmp15 * tmp8
tmp18 = tl.where(tmp4, tmp16, tmp17)
tmp19 = tl.where(tmp2, tmp13, tmp18)
tl.store(out_ptr0 + x5, tmp19, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_avg_pool3d_constant_pad_nd_0[grid(64)](arg0_1,
buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
triton_poi_fused_copy_div_mul_1[grid(64)](buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf0
return buf1,
class tfAvgPool3DNew(nn.Module):
def __init__(self) ->None:
super().__init__()
self.avgf = nn.AvgPool3d((1, 3, 3), stride=(1, 2, 2))
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Jo951128/2021-2-MIP | tfAvgPool3D | false | 2,423 | [
"MIT"
] | 0 | 511e0a38816d16fdba9631f76cf913ba51c43138 | https://github.com/Jo951128/2021-2-MIP/tree/511e0a38816d16fdba9631f76cf913ba51c43138 | import torch
from torch import Tensor
from torch import nn
class Model(nn.Module):
def __init__(self) ->None:
super().__init__()
self.avgf = nn.AvgPool3d((1, 3, 3), stride=(1, 2, 2))
def forward(self, x: 'Tensor') ->Tensor:
if x.shape[-1] != x.shape[-2]:
raise RuntimeError('only same shape for h and w ' +
'are supported by avg with tf_like')
if x.shape[-1] != x.shape[-2]:
raise RuntimeError('only same shape for h and w ' +
'are supported by avg with tf_like')
f1 = x.shape[-1] % 2 != 0
if f1:
padding_pad = 0, 0, 0, 0
else:
padding_pad = 0, 1, 0, 1
x = torch.nn.functional.pad(x, padding_pad)
if f1:
x = torch.nn.functional.avg_pool3d(x, (1, 3, 3), stride=(1, 2,
2), count_include_pad=False, padding=(0, 1, 1))
else:
x = self.avgf(x)
x[..., -1] = x[..., -1] * 9 / 6
x[..., -1, :] = x[..., -1, :] * 9 / 6
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
SineLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ej/cejzhnnynxtkiot2qt7feea4bkwhxo5g2qmtwe2jbyvjefkkzt6m.py
# Topologically Sorted Source Nodes: [mul, sin], Original ATen: [aten.mul, aten.sin]
# Source node to ATen node mapping:
# mul => mul
# sin => sin
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 30), kwargs = {})
# %sin : [num_users=1] = call_function[target=torch.ops.aten.sin.default](args = (%mul,), kwargs = {})
triton_poi_fused_mul_sin_0 = async_compile.triton('triton_poi_fused_mul_sin_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sin_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sin_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 30.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.sin(tmp2)
tl.store(out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, sin], Original ATen: [aten.mul, aten.sin]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_sin_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
return (buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
class SineLayer(nn.Module):
def __init__(self, in_features: 'int', out_features: 'int', omega_0:
'float'=30, is_first: 'bool'=False) ->None:
"""Sine activation function layer with omega_0 scaling.
Args:
in_features (int): Number of input features.
out_features (int): Number of output features.
omega_0 (float, optional): Scaling factor of the Sine function. Defaults to 30.
is_first (bool, optional): Defaults to False.
"""
super().__init__()
self.omega_0 = omega_0
self.is_first = is_first
self.in_features = in_features
self.linear = nn.Linear(in_features, out_features)
self.init_weights()
def init_weights(self) ->None:
"""Initialization of the weigths."""
with torch.no_grad():
if self.is_first:
self.linear.weight.uniform_(-1 / self.in_features, 1 / self
.in_features)
else:
self.linear.weight.uniform_(-np.sqrt(6 / self.in_features) /
self.omega_0, np.sqrt(6 / self.in_features) / self.omega_0)
def forward(self, input: 'torch.Tensor') ->torch.Tensor:
"""Forward pass through the layer.
Args:
input (torch.Tensor): Input tensor of shape (n_samples, n_inputs).
Returns:
torch.Tensor: Prediction of shape (n_samples, n_outputs)
"""
return torch.sin(self.omega_0 * self.linear(input))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_sin_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 30.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.sin(tmp2)
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sin_0[grid(256)](buf0, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0
class SineLayerNew(nn.Module):
def __init__(self, in_features: 'int', out_features: 'int', omega_0:
'float'=30, is_first: 'bool'=False) ->None:
"""Sine activation function layer with omega_0 scaling.
Args:
in_features (int): Number of input features.
out_features (int): Number of output features.
omega_0 (float, optional): Scaling factor of the Sine function. Defaults to 30.
is_first (bool, optional): Defaults to False.
"""
super().__init__()
self.omega_0 = omega_0
self.is_first = is_first
self.in_features = in_features
self.linear = nn.Linear(in_features, out_features)
self.init_weights()
def init_weights(self) ->None:
"""Initialization of the weigths."""
with torch.no_grad():
if self.is_first:
self.linear.weight.uniform_(-1 / self.in_features, 1 / self
.in_features)
else:
self.linear.weight.uniform_(-np.sqrt(6 / self.in_features) /
self.omega_0, np.sqrt(6 / self.in_features) / self.omega_0)
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| Jose-Bastos/DeePyMoD | SineLayer | false | 2,424 | [
"MIT"
] | 0 | c043f9314990c9dd67d8f897cb14e107758f326d | https://github.com/Jose-Bastos/DeePyMoD/tree/c043f9314990c9dd67d8f897cb14e107758f326d | import torch
import numpy as np
import torch.nn as nn
class Model(nn.Module):
def __init__(self, in_features: 'int', out_features: 'int', omega_0:
'float'=30, is_first: 'bool'=False) ->None:
"""Sine activation function layer with omega_0 scaling.
Args:
in_features (int): Number of input features.
out_features (int): Number of output features.
omega_0 (float, optional): Scaling factor of the Sine function. Defaults to 30.
is_first (bool, optional): Defaults to False.
"""
super().__init__()
self.omega_0 = omega_0
self.is_first = is_first
self.in_features = in_features
self.linear = nn.Linear(in_features, out_features)
self.init_weights()
def init_weights(self) ->None:
"""Initialization of the weigths."""
with torch.no_grad():
if self.is_first:
self.linear.weight.uniform_(-1 / self.in_features, 1 / self
.in_features)
else:
self.linear.weight.uniform_(-np.sqrt(6 / self.in_features) /
self.omega_0, np.sqrt(6 / self.in_features) / self.omega_0)
def forward(self, input: 'torch.Tensor') ->torch.Tensor:
"""Forward pass through the layer.
Args:
input (torch.Tensor): Input tensor of shape (n_samples, n_inputs).
Returns:
torch.Tensor: Prediction of shape (n_samples, n_outputs)
"""
return torch.sin(self.omega_0 * self.linear(input))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
ConvSqu | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/nt/cnt242zndfm3n4oylofk2hfby6qwsvn3dgogtgqskjur3zjntaph.py
# Topologically Sorted Source Nodes: [softplus, tanh, mul], Original ATen: [aten.softplus, aten.tanh, aten.mul]
# Source node to ATen node mapping:
# mul => mul
# softplus => exp, gt, log1p, where
# tanh => tanh
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%convolution,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 20), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %log1p), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%where,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, %tanh), kwargs = {})
triton_poi_fused_mul_softplus_tanh_0 = async_compile.triton('triton_poi_fused_mul_softplus_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_softplus_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_softplus_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = libdevice.tanh(tmp5)
tmp7 = tmp0 * tmp6
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softplus, tanh, mul], Original ATen: [aten.softplus, aten.tanh, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_softplus_tanh_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
return (buf1, primals_1, primals_2, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
def autopad(k, p=None):
if p is None:
p = k // 2 if isinstance(k, int) else [(x // 2) for x in k]
return p
class Mish(nn.Module):
@staticmethod
def forward(x):
return x * F.softplus(x).tanh()
class ConvSqu(nn.Module):
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):
super(ConvSqu, self).__init__()
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False
)
self.act = Mish() if act else nn.Identity()
def forward(self, x):
return self.act(self.conv(x))
def fuseforward(self, x):
return self.act(self.conv(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'c1': 4, 'c2': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_softplus_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = libdevice.tanh(tmp5)
tmp7 = tmp0 * tmp6
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_softplus_tanh_0[grid(256)](buf0, buf1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
return buf1, primals_1, primals_2, buf0
def autopad(k, p=None):
if p is None:
p = k // 2 if isinstance(k, int) else [(x // 2) for x in k]
return p
class Mish(nn.Module):
@staticmethod
def forward(x):
return x * F.softplus(x).tanh()
class ConvSquNew(nn.Module):
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):
super(ConvSquNew, self).__init__()
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False
)
self.act = Mish() if act else nn.Identity()
def fuseforward(self, x):
return self.act(self.conv(x))
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| JuliannaChaykina/social-distance | ConvSqu | false | 2,425 | [
"Apache-2.0"
] | 0 | 1c8ade043254b78de49a1244d438203ddb38c586 | https://github.com/JuliannaChaykina/social-distance/tree/1c8ade043254b78de49a1244d438203ddb38c586 | import torch
import torch.nn as nn
import torch.nn.functional as F
def autopad(k, p=None):
if p is None:
p = k // 2 if isinstance(k, int) else [(x // 2) for x in k]
return p
class Mish(nn.Module):
@staticmethod
def forward(x):
return x * F.softplus(x).tanh()
class Model(nn.Module):
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):
super().__init__()
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False
)
self.act = Mish() if act else nn.Identity()
def forward(self, x):
return self.act(self.conv(x))
def fuseforward(self, x):
return self.act(self.conv(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
ConvSig | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/6q/c6qtcqlhzp2spzvdv6knpwm32alhfras7qga7rybepicf6poy4sy.py
# Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# sigmoid => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {})
triton_poi_fused_sigmoid_0 = async_compile.triton('triton_poi_fused_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid]
stream0 = get_raw_stream(0)
triton_poi_fused_sigmoid_0.run(buf1, 256, grid=grid(256), stream=stream0)
return (buf1, primals_1, primals_2, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def autopad(k, p=None):
if p is None:
p = k // 2 if isinstance(k, int) else [(x // 2) for x in k]
return p
class ConvSig(nn.Module):
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):
super(ConvSig, self).__init__()
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False
)
self.act = nn.Sigmoid() if act else nn.Identity()
def forward(self, x):
return self.act(self.conv(x))
def fuseforward(self, x):
return self.act(self.conv(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'c1': 4, 'c2': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_sigmoid_0[grid(256)](buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
return buf1, primals_1, primals_2, buf1
def autopad(k, p=None):
if p is None:
p = k // 2 if isinstance(k, int) else [(x // 2) for x in k]
return p
class ConvSigNew(nn.Module):
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):
super(ConvSigNew, self).__init__()
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False
)
self.act = nn.Sigmoid() if act else nn.Identity()
def fuseforward(self, x):
return self.act(self.conv(x))
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| JuliannaChaykina/social-distance | ConvSig | false | 2,426 | [
"Apache-2.0"
] | 0 | 1c8ade043254b78de49a1244d438203ddb38c586 | https://github.com/JuliannaChaykina/social-distance/tree/1c8ade043254b78de49a1244d438203ddb38c586 | import torch
import torch.nn as nn
def autopad(k, p=None):
if p is None:
p = k // 2 if isinstance(k, int) else [(x // 2) for x in k]
return p
class Model(nn.Module):
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):
super().__init__()
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False
)
self.act = nn.Sigmoid() if act else nn.Identity()
def forward(self, x):
return self.act(self.conv(x))
def fuseforward(self, x):
return self.act(self.conv(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
MP | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/tw/ctwvjh3sk72wvpvfe6qttbbnuv7go7omfvhyfpoli3k62h5jasad.py
# Topologically Sorted Source Nodes: [max_pool2d], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# max_pool2d => getitem
# Graph fragment:
# %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (8*x1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (5 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_pool2d], Original ATen: [aten.max_pool2d_with_indices]
stream0 = get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class MP(nn.Module):
def __init__(self, k=2):
super(MP, self).__init__()
self.m = nn.MaxPool2d(kernel_size=k, stride=k)
def forward(self, x):
return self.m(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0[grid(64)](arg0_1, buf0,
64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
return buf0,
class MPNew(nn.Module):
def __init__(self, k=2):
super(MPNew, self).__init__()
self.m = nn.MaxPool2d(kernel_size=k, stride=k)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| JuliannaChaykina/social-distance | MP | false | 2,427 | [
"Apache-2.0"
] | 0 | 1c8ade043254b78de49a1244d438203ddb38c586 | https://github.com/JuliannaChaykina/social-distance/tree/1c8ade043254b78de49a1244d438203ddb38c586 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, k=2):
super().__init__()
self.m = nn.MaxPool2d(kernel_size=k, stride=k)
def forward(self, x):
return self.m(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
InternalQNetwork | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/sm/csm4ofalq42npqq7fv6jo3il6ujywmjwqnazwa5z35h4asxel7vx.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu, %slice_4], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 272
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 68
x1 = (xindex // 68)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((64*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1], 68, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tl.load(in_ptr2 + ((4*x1) + ((-64) + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tl.where(tmp4, tmp11, tmp15)
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/i5/ci5f4nyelvfg4yf2o65ompoikj7ejkd32vb6hqtyrgycc5eswrpx.py
# Topologically Sorted Source Nodes: [x2], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x2 => relu_1
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_5), kwargs = {})
# %relu_1 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/7x/c7x5xwxa3trnfvh3a36jcvmeiys35v7jdmhxe452kjq44lx6572n.py
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_1 => cat_1
# Graph fragment:
# %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu, %relu_1], 1), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 192
x1 = (xindex // 192)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((64*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1], 192, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tl.load(in_ptr2 + ((128*x1) + ((-64) + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tl.where(tmp4, tmp11, tmp15)
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/yi/cyixav4lzx3eaexai4duichc4uufdykdv7zas2wsy3lr4ygpciwl.py
# Topologically Sorted Source Nodes: [recurrent_activation], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# recurrent_activation => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%addmm_2,), kwargs = {})
triton_poi_fused_sigmoid_3 = async_compile.triton('triton_poi_fused_sigmoid_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(out_ptr0 + (x0 + (8*x1)), tmp1, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ky/ckyootcjihnfezu7ihooc3rl37mg6xu7uyemhngi3osilyhnztyl.py
# Topologically Sorted Source Nodes: [x1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x1 => relu
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_3), kwargs = {})
# %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_4 = async_compile.triton('triton_poi_fused_relu_threshold_backward_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (64, 4), (4, 1))
assert_size_stride(primals_3, (64, ), (1, ))
assert_size_stride(primals_4, (128, 68), (68, 1))
assert_size_stride(primals_5, (128, ), (1, ))
assert_size_stride(primals_6, (4, 192), (192, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 128), (128, 1))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 68), (68, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(buf0, primals_3, primals_1, buf1, 272, grid=grid(272), stream=stream0)
buf2 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (68, 128), (1, 68), 0), out=buf2)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x2], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf3, primals_5, 512, grid=grid(512), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 192), (192, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(buf0, primals_3, buf3, buf4, 768, grid=grid(768), stream=stream0)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, buf4, reinterpret_tensor(primals_6, (192, 4), (1, 192), 0), alpha=1, beta=1, out=buf5)
del primals_7
buf8 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
buf6 = reinterpret_tensor(buf8, (4, 4), (8, 1), 0) # alias
# Topologically Sorted Source Nodes: [action_activation], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, buf3, reinterpret_tensor(primals_8, (128, 4), (1, 128), 0), alpha=1, beta=1, out=buf6)
del primals_9
buf7 = reinterpret_tensor(buf8, (4, 4), (8, 1), 4) # alias
# Topologically Sorted Source Nodes: [recurrent_activation], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_3.run(buf5, buf7, 16, grid=grid(16), stream=stream0)
buf9 = empty_strided_cuda((4, 64), (64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_4.run(buf0, primals_3, buf9, 256, grid=grid(256), stream=stream0)
del buf0
del primals_3
return (buf8, primals_1, buf1, buf3, buf4, buf5, primals_8, primals_6, primals_4, buf9, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((128, 68), (68, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 192), (192, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
class InternalQNetwork(nn.Module):
def __init__(self, state_size, action_size, recurrent_size, seed,
fc1_units=64, fc2_units=128):
super(InternalQNetwork, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc1_units)
self.fc2 = nn.Linear(fc1_units + recurrent_size, fc2_units)
self.fc3 = nn.Linear(fc1_units + fc2_units, recurrent_size)
self.fc4 = nn.Linear(fc2_units, action_size)
def forward(self, x):
obs = x[:, :8]
prev_recurrent = x[:, -5:]
x1 = F.relu(self.fc1(obs))
x2 = F.relu(self.fc2(torch.cat([x1, prev_recurrent], dim=1)))
recurrent_activation = torch.sigmoid(self.fc3(torch.cat([x1, x2],
dim=1)))
action_activation = self.fc4(x2)
return torch.cat([action_activation, recurrent_activation], dim=1)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'state_size': 4, 'action_size': 4, 'recurrent_size': 4,
'seed': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 272
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 68
x1 = xindex // 68
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (64 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tl.full([1], 68, tl.int64)
tmp15 = tl.load(in_ptr2 + (4 * x1 + (-64 + x0)), tmp12 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tl.where(tmp4, tmp11, tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 192
x1 = xindex // 192
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (64 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tl.full([1], 192, tl.int64)
tmp15 = tl.load(in_ptr2 + (128 * x1 + (-64 + x0)), tmp12 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tl.where(tmp4, tmp11, tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_sigmoid_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(out_ptr0 + (x0 + 8 * x1), tmp1, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_4(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (64, 4), (4, 1))
assert_size_stride(primals_3, (64,), (1,))
assert_size_stride(primals_4, (128, 68), (68, 1))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (4, 192), (192, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 128), (128, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 64),
(1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 68), (68, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(272)](buf0, primals_3, primals_1, buf1,
272, XBLOCK=256, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (68, 128), (1,
68), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_1[grid(512)](buf3, primals_5, 512, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 192), (192, 1), torch.float32)
triton_poi_fused_cat_2[grid(768)](buf0, primals_3, buf3, buf4, 768,
XBLOCK=256, num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, buf4, reinterpret_tensor(primals_6,
(192, 4), (1, 192), 0), alpha=1, beta=1, out=buf5)
del primals_7
buf8 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
buf6 = reinterpret_tensor(buf8, (4, 4), (8, 1), 0)
extern_kernels.addmm(primals_9, buf3, reinterpret_tensor(primals_8,
(128, 4), (1, 128), 0), alpha=1, beta=1, out=buf6)
del primals_9
buf7 = reinterpret_tensor(buf8, (4, 4), (8, 1), 4)
triton_poi_fused_sigmoid_3[grid(16)](buf5, buf7, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf9 = empty_strided_cuda((4, 64), (64, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_4[grid(256)](buf0,
primals_3, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del primals_3
return (buf8, primals_1, buf1, buf3, buf4, buf5, primals_8, primals_6,
primals_4, buf9)
class InternalQNetworkNew(nn.Module):
def __init__(self, state_size, action_size, recurrent_size, seed,
fc1_units=64, fc2_units=128):
super(InternalQNetworkNew, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc1_units)
self.fc2 = nn.Linear(fc1_units + recurrent_size, fc2_units)
self.fc3 = nn.Linear(fc1_units + fc2_units, recurrent_size)
self.fc4 = nn.Linear(fc2_units, action_size)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_8 = self.fc4.weight
primals_9 = self.fc4.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| Josh-Joseph/tsc-2019 | InternalQNetwork | false | 2,428 | [
"MIT"
] | 0 | 0cb68b69448257ec7fd8d9edaf6b8aa165599554 | https://github.com/Josh-Joseph/tsc-2019/tree/0cb68b69448257ec7fd8d9edaf6b8aa165599554 | import torch
import torch.nn.functional as F
import torch.nn as nn
class Model(nn.Module):
def __init__(self, state_size, action_size, recurrent_size, seed,
fc1_units=64, fc2_units=128):
super().__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc1_units)
self.fc2 = nn.Linear(fc1_units + recurrent_size, fc2_units)
self.fc3 = nn.Linear(fc1_units + fc2_units, recurrent_size)
self.fc4 = nn.Linear(fc2_units, action_size)
def forward(self, x):
obs = x[:, :8]
prev_recurrent = x[:, -5:]
x1 = F.relu(self.fc1(obs))
x2 = F.relu(self.fc2(torch.cat([x1, prev_recurrent], dim=1)))
recurrent_activation = torch.sigmoid(self.fc3(torch.cat([x1, x2],
dim=1)))
action_activation = self.fc4(x2)
return torch.cat([action_activation, recurrent_activation], dim=1)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'state_size': 4, 'action_size': 4, 'recurrent_size': 4,
'seed': 4}]
|
Hardsigmoid | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/up/cupcnt2ednegkxpkhimpev2wbxmbkkih7j53vbxggg2ozvitm6ob.py
# Topologically Sorted Source Nodes: [mul, add, x], Original ATen: [aten.mul, aten.add, aten.clamp]
# Source node to ATen node mapping:
# add => add
# mul => mul
# x => clamp_max, clamp_min
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 0.5), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0.0), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 1.0), kwargs = {})
triton_poi_fused_add_clamp_mul_0 = async_compile.triton('triton_poi_fused_add_clamp_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.2
tmp2 = tmp0 * tmp1
tmp3 = 0.5
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = 1.0
tmp8 = triton_helpers.minimum(tmp6, tmp7)
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, add, x], Original ATen: [aten.mul, aten.add, aten.clamp]
stream0 = get_raw_stream(0)
triton_poi_fused_add_clamp_mul_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import Tensor
from torch import nn
class Hardsigmoid(nn.Module):
def __init__(self) ->None:
super().__init__()
def forward(self, x: 'Tensor') ->Tensor:
x = (0.2 * x + 0.5).clamp(min=0.0, max=1.0)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_clamp_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.2
tmp2 = tmp0 * tmp1
tmp3 = 0.5
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = 1.0
tmp8 = triton_helpers.minimum(tmp6, tmp7)
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_clamp_mul_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class HardsigmoidNew(nn.Module):
def __init__(self) ->None:
super().__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Jo951128/2021-2-MIP | Hardsigmoid | false | 2,429 | [
"MIT"
] | 0 | 511e0a38816d16fdba9631f76cf913ba51c43138 | https://github.com/Jo951128/2021-2-MIP/tree/511e0a38816d16fdba9631f76cf913ba51c43138 | import torch
from torch import Tensor
from torch import nn
class Model(nn.Module):
def __init__(self) ->None:
super().__init__()
def forward(self, x: 'Tensor') ->Tensor:
x = (0.2 * x + 0.5).clamp(min=0.0, max=1.0)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
FMNISTModel | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/oh/cohhzozgklcdr3g2cpdmnac2zvbvmk53smneafef4zekz5p2kieu.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 8
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/qn/cqnqaq7kh6sypugb6bqfg74kezlshfvip2ipwvaogffif2deremo.py
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# x_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/vp/cvpcebh3g2ngokhs3y5ftkrc3csu3fyxpu3jlt3rwpmhnrjoopvv.py
# Topologically Sorted Source Nodes: [conv2d_2, x_2, x_3], Original ATen: [aten.convolution, aten.relu, aten.mean, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# x_2 => relu_2
# x_3 => mean
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%relu_2, [-1, -2], True), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
triton_red_fused_convolution_mean_relu_threshold_backward_2 = async_compile.triton('triton_red_fused_convolution_mean_relu_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[128, 4096],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_convolution_mean_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_convolution_mean_relu_threshold_backward_2(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 128
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x3 = xindex
x0 = xindex % 32
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
_tmp6 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = _tmp6 + tmp5
_tmp6 = tl.where(rmask & xmask, tmp7, _tmp6)
tmp8 = 0.0
tmp9 = tmp4 <= tmp8
tl.store(out_ptr0 + (r2 + (4096*x3)), tmp9, rmask & xmask)
tmp6 = tl.sum(_tmp6, 1)[:, None]
tmp10 = 4096.0
tmp11 = tmp6 / tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + (x3), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/j2/cj2fvpown4cia7d7vkfcgcqgyjjenqjrj3dsf57yvha4phl4yqmw.py
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => amax, exp, log, sub, sub_1, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm, %amax), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_per_fused__log_softmax_3 = async_compile.triton('triton_per_fused__log_softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_3(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (10*x0)), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + (10*x0)), tmp12, rmask & xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (8, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (8, ), (1, ))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (16, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (32, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_7, (32, ), (1, ))
assert_size_stride(primals_8, (10, 32), (32, 1))
assert_size_stride(primals_9, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 131072, grid=grid(131072), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf3, primals_5, 262144, grid=grid(262144), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf5 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf11 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool)
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, x_2, x_3], Original ATen: [aten.convolution, aten.relu, aten.mean, aten.threshold_backward]
triton_red_fused_convolution_mean_relu_threshold_backward_2.run(buf6, buf4, primals_7, buf11, 128, 4096, grid=grid(128), stream=stream0)
del buf4
del primals_7
buf7 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, reinterpret_tensor(buf6, (4, 32), (32, 1), 0), reinterpret_tensor(primals_8, (32, 10), (1, 32), 0), alpha=1, beta=1, out=buf7)
del primals_9
buf10 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
triton_per_fused__log_softmax_3.run(buf7, buf10, 4, 10, grid=grid(4), stream=stream0)
del buf7
return (buf10, primals_1, primals_3, primals_4, primals_6, buf1, buf3, reinterpret_tensor(buf6, (4, 32), (32, 1), 0), buf10, primals_8, buf11, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((8, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((32, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((10, 32), (32, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class FMNISTModel(nn.Module):
def __init__(self):
super(FMNISTModel, self).__init__()
self.conv1 = nn.Conv2d(1, 8, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(8, 16, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(16, 32, kernel_size=3, padding=1)
self.pool = nn.AdaptiveAvgPool2d(1)
self.out = nn.Linear(32, 10)
self.criterion = nn.NLLLoss()
self.optimizer = torch.optim.Adam(self.parameters(), 0.003)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = self.pool(x)
x = x.view(x.size(0), -1)
return F.log_softmax(self.out(x), dim=1)
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 8
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 16
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_red_fused_convolution_mean_relu_threshold_backward_2(in_out_ptr0,
in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr,
RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x3 = xindex
x0 = xindex % 32
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
_tmp6 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = _tmp6 + tmp5
_tmp6 = tl.where(rmask & xmask, tmp7, _tmp6)
tmp8 = 0.0
tmp9 = tmp4 <= tmp8
tl.store(out_ptr0 + (r2 + 4096 * x3), tmp9, rmask & xmask)
tmp6 = tl.sum(_tmp6, 1)[:, None]
tmp10 = 4096.0
tmp11 = tmp6 / tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + x3, tmp11, xmask)
@triton.jit
def triton_per_fused__log_softmax_3(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (8, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (16, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (32, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_7, (32,), (1,))
assert_size_stride(primals_8, (10, 32), (32, 1))
assert_size_stride(primals_9, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(131072)](buf1, primals_2,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(262144)](buf3, primals_5,
262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf5 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf11 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
buf6 = buf5
del buf5
triton_red_fused_convolution_mean_relu_threshold_backward_2[grid(128)](
buf6, buf4, primals_7, buf11, 128, 4096, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
del buf4
del primals_7
buf7 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf6, (4, 32), (
32, 1), 0), reinterpret_tensor(primals_8, (32, 10), (1, 32), 0),
alpha=1, beta=1, out=buf7)
del primals_9
buf10 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
triton_per_fused__log_softmax_3[grid(4)](buf7, buf10, 4, 10, XBLOCK
=1, num_warps=2, num_stages=1)
del buf7
return (buf10, primals_1, primals_3, primals_4, primals_6, buf1, buf3,
reinterpret_tensor(buf6, (4, 32), (32, 1), 0), buf10, primals_8, buf11)
class FMNISTModelNew(nn.Module):
def __init__(self):
super(FMNISTModelNew, self).__init__()
self.conv1 = nn.Conv2d(1, 8, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(8, 16, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(16, 32, kernel_size=3, padding=1)
self.pool = nn.AdaptiveAvgPool2d(1)
self.out = nn.Linear(32, 10)
self.criterion = nn.NLLLoss()
self.optimizer = torch.optim.Adam(self.parameters(), 0.003)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.out.weight
primals_9 = self.out.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| BrandonLMorris/image-classification | FMNISTModel | false | 2,430 | [
"Apache-2.0"
] | 0 | 6461d735fbf73bfd181b5b16f703a2a8ea53833b | https://github.com/BrandonLMorris/image-classification/tree/6461d735fbf73bfd181b5b16f703a2a8ea53833b | import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 8, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(8, 16, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(16, 32, kernel_size=3, padding=1)
self.pool = nn.AdaptiveAvgPool2d(1)
self.out = nn.Linear(32, 10)
self.criterion = nn.NLLLoss()
self.optimizer = torch.optim.Adam(self.parameters(), 0.003)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = self.pool(x)
x = x.view(x.size(0), -1)
return F.log_softmax(self.out(x), dim=1)
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return []
|
CDilated | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/6r/c6rkywilfuy64m6cuftgkvjhytprq2kemqwqphmypsicig6wdmin.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# output => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 9) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 3, 3), (36, 9, 3, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 144, grid=grid(144), stream=stream0)
del primals_2
return (buf1, primals_1, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class CDilated(nn.Module):
"""
This class defines the dilated convolution.
空洞卷积
"""
def __init__(self, nIn, nOut, kSize, stride=1, d=1):
"""
:param nIn: number of input channels
:param nOut: number of output channels
:param kSize: kernel size
:param stride: optional stride rate for down-sampling
:param d: optional dilation rate
"""
super().__init__()
padding = int((kSize - 1) / 2) * d
self.conv = nn.Conv2d(nIn, nOut, (kSize, kSize), stride=stride,
padding=(padding, padding), bias=True, dilation=d)
def forward(self, input):
"""
:param input: input feature map
:return: transformed feature map
"""
output = self.conv(input)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'nIn': 4, 'nOut': 4, 'kSize': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 9 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 3, 3), (36, 9, 3, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(144)](buf1, primals_2, 144,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, primals_3
class CDilatedNew(nn.Module):
"""
This class defines the dilated convolution.
空洞卷积
"""
def __init__(self, nIn, nOut, kSize, stride=1, d=1):
"""
:param nIn: number of input channels
:param nOut: number of output channels
:param kSize: kernel size
:param stride: optional stride rate for down-sampling
:param d: optional dilation rate
"""
super().__init__()
padding = int((kSize - 1) / 2) * d
self.conv = nn.Conv2d(nIn, nOut, (kSize, kSize), stride=stride,
padding=(padding, padding), bias=True, dilation=d)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| IRLSCU/siamban | CDilated | false | 2,431 | [
"Apache-2.0"
] | 0 | abb12d028e93aaee74efc5042a5bb305c7805053 | https://github.com/IRLSCU/siamban/tree/abb12d028e93aaee74efc5042a5bb305c7805053 | import torch
import torch.nn as nn
class Model(nn.Module):
"""
This class defines the dilated convolution.
空洞卷积
"""
def __init__(self, nIn, nOut, kSize, stride=1, d=1):
"""
:param nIn: number of input channels
:param nOut: number of output channels
:param kSize: kernel size
:param stride: optional stride rate for down-sampling
:param d: optional dilation rate
"""
super().__init__()
padding = int((kSize - 1) / 2) * d
self.conv = nn.Conv2d(nIn, nOut, (kSize, kSize), stride=stride,
padding=(padding, padding), bias=True, dilation=d)
def forward(self, input):
"""
:param input: input feature map
:return: transformed feature map
"""
output = self.conv(input)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
DotAttention | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ay/caylcn737p2wwjm32cacv462xdgdut6ho32ptwxfu34t3i2tr75z.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask)
tl.store(out_ptr0 + (x4), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), out=buf1)
del arg1_1
del buf0
return (reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.optim
class AttentionMechanism(nn.Module):
def __init__(self):
super(AttentionMechanism, self).__init__()
def forward(self, *input):
raise NotImplementedError('Implement this.')
class DotAttention(AttentionMechanism):
def __init__(self):
super(DotAttention, self).__init__()
def forward(self, q, k):
return q @ k.transpose(1, 2)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), out
=buf1)
del arg1_1
del buf0
return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0),
class AttentionMechanism(nn.Module):
def __init__(self):
super(AttentionMechanism, self).__init__()
def forward(self, *input):
raise NotImplementedError('Implement this.')
class DotAttentionNew(AttentionMechanism):
def __init__(self):
super(DotAttentionNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| JoshuaGhost/e2expred | DotAttention | false | 2,432 | [
"MIT"
] | 0 | f4dee47c41748a64509b68daee83d97919b6c978 | https://github.com/JoshuaGhost/e2expred/tree/f4dee47c41748a64509b68daee83d97919b6c978 | import torch
from torch import nn
import torch.optim
class AttentionMechanism(nn.Module):
def __init__(self):
super().__init__()
def forward(self, *input):
raise NotImplementedError('Implement this.')
class Model(AttentionMechanism):
def __init__(self):
super().__init__()
def forward(self, q, k):
return q @ k.transpose(1, 2)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Classify | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/l3/cl35tzbhrd24dhunkbb6gjs54aklpyr46oikqhoylcgmkcmhujil.py
# Topologically Sorted Source Nodes: [adaptive_avg_pool2d], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# adaptive_avg_pool2d => mean
# Graph fragment:
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1, -2], True), kwargs = {})
triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [adaptive_avg_pool2d], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_0.run(buf1, primals_1, 16, 16, grid=grid(16), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
return (reinterpret_tensor(buf2, (4, 4), (4, 1), 0), primals_2, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def autopad(k, p=None):
if p is None:
p = k // 2 if isinstance(k, int) else [(x // 2) for x in k]
return p
class Flatten(nn.Module):
@staticmethod
def forward(x):
return x.view(x.size(0), -1)
class Classify(nn.Module):
def __init__(self, c1, c2, k=1, s=1, p=None, g=1):
super(Classify, self).__init__()
self.aap = nn.AdaptiveAvgPool2d(1)
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False
)
self.flat = Flatten()
def forward(self, x):
z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else
[x])], 1)
return self.flat(self.conv(z))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'c1': 4, 'c2': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
del primals_1
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
return reinterpret_tensor(buf2, (4, 4), (4, 1), 0), primals_2, buf1
def autopad(k, p=None):
if p is None:
p = k // 2 if isinstance(k, int) else [(x // 2) for x in k]
return p
class Flatten(nn.Module):
@staticmethod
def forward(x):
return x.view(x.size(0), -1)
class ClassifyNew(nn.Module):
def __init__(self, c1, c2, k=1, s=1, p=None, g=1):
super(ClassifyNew, self).__init__()
self.aap = nn.AdaptiveAvgPool2d(1)
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False
)
self.flat = Flatten()
def forward(self, input_0):
primals_2 = self.conv.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| JuliannaChaykina/social-distance | Classify | false | 2,433 | [
"Apache-2.0"
] | 0 | 1c8ade043254b78de49a1244d438203ddb38c586 | https://github.com/JuliannaChaykina/social-distance/tree/1c8ade043254b78de49a1244d438203ddb38c586 | import torch
import torch.nn as nn
def autopad(k, p=None):
if p is None:
p = k // 2 if isinstance(k, int) else [(x // 2) for x in k]
return p
class Flatten(nn.Module):
@staticmethod
def forward(x):
return x.view(x.size(0), -1)
class Model(nn.Module):
def __init__(self, c1, c2, k=1, s=1, p=None, g=1):
super().__init__()
self.aap = nn.AdaptiveAvgPool2d(1)
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False
)
self.flat = Flatten()
def forward(self, x):
z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else
[x])], 1)
return self.flat(self.conv(z))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Highway | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/mx/cmxbsuduhrb3re3kyyjzil6fiub7wk6y3jw7tjv4rnhxateuwfne.py
# Topologically Sorted Source Nodes: [x_proj, x_gate, mul, sub, mul_1, x_highway], Original ATen: [aten.relu, aten.sigmoid, aten.mul, aten.rsub, aten.add]
# Source node to ATen node mapping:
# mul => mul
# mul_1 => mul_1
# sub => sub
# x_gate => sigmoid
# x_highway => add
# x_proj => relu
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_3,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %relu), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %primals_3), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
triton_poi_fused_add_mul_relu_rsub_sigmoid_0 = async_compile.triton('triton_poi_fused_add_mul_relu_rsub_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_relu_rsub_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_relu_rsub_sigmoid_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask)
tmp8 = tl.load(in_ptr2 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tmp1 * tmp4
tmp6 = 1.0
tmp7 = tmp6 - tmp1
tmp9 = tmp7 * tmp8
tmp10 = tmp5 + tmp9
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_proj, x_gate, mul, sub, mul_1, x_highway], Original ATen: [aten.relu, aten.sigmoid, aten.mul, aten.rsub, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_relu_rsub_sigmoid_0.run(buf1, buf0, primals_3, buf2, 256, grid=grid(256), stream=stream0)
return (buf2, primals_3, buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.utils
class Highway(nn.Module):
def __init__(self, e_word):
""" Init Highway.
@param e_word (int): Output embedding size of target word.
"""
super(Highway, self).__init__()
self.proj_layer = nn.Linear(e_word, e_word)
self.gate_layer = nn.Linear(e_word, e_word)
self.ReLU = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def forward(self, x_conv_out):
""" Forward pass of Highway.
@param x_conv_out (Tensor): tensor from convolutional layer, shape (batch_size, e_word)
@returns x_highway (Tensor): output tensor after highway layer, shape (batch_size, e_word)
"""
x_proj = self.ReLU(self.proj_layer(x_conv_out))
x_gate = self.sigmoid(self.gate_layer(x_conv_out))
x_highway = x_gate * x_proj + (1 - x_gate) * x_conv_out
return x_highway
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'e_word': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_relu_rsub_sigmoid_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp8 = tl.load(in_ptr2 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tmp1 * tmp4
tmp6 = 1.0
tmp7 = tmp6 - tmp1
tmp9 = tmp7 * tmp8
tmp10 = tmp5 + tmp9
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_relu_rsub_sigmoid_0[grid(256)](buf1, buf0,
primals_3, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
return buf2, primals_3, buf0, buf1
class HighwayNew(nn.Module):
def __init__(self, e_word):
""" Init Highway.
@param e_word (int): Output embedding size of target word.
"""
super(HighwayNew, self).__init__()
self.proj_layer = nn.Linear(e_word, e_word)
self.gate_layer = nn.Linear(e_word, e_word)
self.ReLU = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def forward(self, input_0):
primals_1 = self.proj_layer.weight
primals_2 = self.proj_layer.bias
primals_4 = self.gate_layer.weight
primals_5 = self.gate_layer.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| KIONLEE/cs224n | Highway | false | 2,434 | [
"MIT"
] | 0 | 63054e187fb40d65af058673fe7aa2f22433da6e | https://github.com/KIONLEE/cs224n/tree/63054e187fb40d65af058673fe7aa2f22433da6e | import torch
import torch.nn as nn
import torch.nn.utils
class Model(nn.Module):
def __init__(self, e_word):
""" Init Highway.
@param e_word (int): Output embedding size of target word.
"""
super().__init__()
self.proj_layer = nn.Linear(e_word, e_word)
self.gate_layer = nn.Linear(e_word, e_word)
self.ReLU = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def forward(self, x_conv_out):
""" Forward pass of Highway.
@param x_conv_out (Tensor): tensor from convolutional layer, shape (batch_size, e_word)
@returns x_highway (Tensor): output tensor after highway layer, shape (batch_size, e_word)
"""
x_proj = self.ReLU(self.proj_layer(x_conv_out))
x_gate = self.sigmoid(self.gate_layer(x_conv_out))
x_highway = x_gate * x_proj + (1 - x_gate) * x_conv_out
return x_highway
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
TorchModule | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/a2/ca2wr2cvkya5clovpxidv7ia56pdcyp7uq4omtpg5m2nr7ya3ryn.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# x_1 => tanh
# Graph fragment:
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {})
triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ku/ckukyw44hxxcrcpyqqe6auljaf54daimtcs6kbykg5nkqzpxqi7c.py
# Topologically Sorted Source Nodes: [tanh_2], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# tanh_2 => tanh_2
# Graph fragment:
# %tanh_2 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_5,), kwargs = {})
triton_poi_fused_tanh_1 = async_compile.triton('triton_poi_fused_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (64, 4), (4, 1))
assert_size_stride(primals_3, (64, ), (1, ))
assert_size_stride(primals_4, (64, 64), (64, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (4, 64), (64, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (1, 4, 4, 4, 64), (4096, 1024, 256, 64, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_tanh_0.run(buf1, primals_3, 4096, grid=grid(4096), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (1, 4, 4, 4, 64), (4096, 1024, 256, 64, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.tanh]
triton_poi_fused_tanh_0.run(buf3, primals_5, 4096, grid=grid(4096), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 4), (1, 64), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((1, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [tanh_2], Original ATen: [aten.tanh]
triton_poi_fused_tanh_1.run(buf4, buf5, 256, grid=grid(256), stream=stream0)
return (reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf1, buf3, buf4, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn
class TorchLinearModule(torch.nn.Module):
def __init__(self, in_size, out_size):
super(TorchLinearModule, self).__init__()
self._linear = torch.nn.Linear(in_size, out_size)
def forward(self, x):
return self._linear(x)
class TorchModule(torch.nn.Module):
def __init__(self, in_size, out_size, dev=None, hidden_size=64):
super(TorchModule, self).__init__()
self._linear0 = TorchLinearModule(in_size, hidden_size)
self._linear1 = TorchLinearModule(hidden_size, hidden_size)
self._linear2 = TorchLinearModule(hidden_size, out_size)
def forward(self, x):
x = x.unsqueeze(0)
x = torch.tanh(self._linear0(x))
x = torch.tanh(self._linear1(x))
return torch.tanh(self._linear2(x))[0]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_size': 4, 'out_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, None)
@triton.jit
def triton_poi_fused_tanh_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (64, 4), (4, 1))
assert_size_stride(primals_3, (64,), (1,))
assert_size_stride(primals_4, (64, 64), (64, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (4, 64), (64, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (1, 4, 4, 4, 64), (4096, 1024, 256,
64, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(4096)](buf1, primals_3, 4096, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0),
reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (1, 4, 4, 4, 64), (4096, 1024, 256,
64, 1), 0)
del buf2
triton_poi_fused_tanh_0[grid(4096)](buf3, primals_5, 4096, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64),
(64, 1), 0), reinterpret_tensor(primals_6, (64, 4), (1, 64), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((1, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_tanh_1[grid(256)](buf4, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), buf1, buf3, buf4, primals_6, primals_4
class TorchLinearModule(torch.nn.Module):
def __init__(self, in_size, out_size):
super(TorchLinearModule, self).__init__()
self._linear = torch.nn.Linear(in_size, out_size)
def forward(self, x):
return self._linear(x)
class TorchModuleNew(torch.nn.Module):
def __init__(self, in_size, out_size, dev=None, hidden_size=64):
super(TorchModuleNew, self).__init__()
self._linear0 = TorchLinearModule(in_size, hidden_size)
self._linear1 = TorchLinearModule(hidden_size, hidden_size)
self._linear2 = TorchLinearModule(hidden_size, out_size)
def forward(self, input_0):
primals_2 = self._linear0._linear.weight
primals_3 = self._linear0._linear.bias
primals_4 = self._linear1._linear.weight
primals_5 = self._linear1._linear.bias
primals_6 = self._linear2._linear.weight
primals_7 = self._linear2._linear.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| JudeDavis1/ivy | TorchModule | false | 2,435 | [
"Apache-2.0"
] | 0 | 0f3dc38f978a6ce65fc1ed11110338d635e5c9f3 | https://github.com/JudeDavis1/ivy/tree/0f3dc38f978a6ce65fc1ed11110338d635e5c9f3 | import torch
import torch.nn
class TorchLinearModule(torch.nn.Module):
def __init__(self, in_size, out_size):
super().__init__()
self._linear = torch.nn.Linear(in_size, out_size)
def forward(self, x):
return self._linear(x)
class Model(torch.nn.Module):
def __init__(self, in_size, out_size, dev=None, hidden_size=64):
super().__init__()
self._linear0 = TorchLinearModule(in_size, hidden_size)
self._linear1 = TorchLinearModule(hidden_size, hidden_size)
self._linear2 = TorchLinearModule(hidden_size, out_size)
def forward(self, x):
x = x.unsqueeze(0)
x = torch.tanh(self._linear0(x))
x = torch.tanh(self._linear1(x))
return torch.tanh(self._linear2(x))[0]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
ShakeResNeXt | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ej/cejfrwnzxinkchwn6symdb72fdtj7gix5hy2vuswodhbeh45mrae.py
# Topologically Sorted Source Nodes: [h, h_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# h => convolution
# h_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (4, 1024), (1024, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [h, h_1], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 1048576, grid=grid(1048576), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [h_2], Original ATen: [aten.avg_pool2d]
buf2 = torch.ops.aten.avg_pool2d.default(buf1, [8, 8], [8, 8], [0, 0], False, True, None)
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf3, (16, 1024), (1024, 1), 0), reinterpret_tensor(primals_4, (1024, 4), (1, 1024), 0), alpha=1, beta=1, out=buf4)
del primals_5
return (buf4, primals_1, primals_3, buf1, reinterpret_tensor(buf3, (16, 1024), (1024, 1), 0), primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 1024), (1024, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
from torch import nn
from numpy import int64 as int64
import torch.nn.functional as F
from torch.autograd import Variable
class ShakeShake(torch.autograd.Function):
@staticmethod
def forward(ctx, x1, x2, training=True):
if training:
alpha = torch.FloatTensor(x1.size(0)).uniform_()
alpha = alpha.view(alpha.size(0), 1, 1, 1).expand_as(x1)
else:
alpha = 0.5
return alpha * x1 + (1 - alpha) * x2
@staticmethod
def backward(ctx, grad_output):
beta = torch.FloatTensor(grad_output.size(0)).uniform_()
beta = beta.view(beta.size(0), 1, 1, 1).expand_as(grad_output)
beta = Variable(beta)
return beta * grad_output, (1 - beta) * grad_output, None
class Shortcut(nn.Module):
def __init__(self, in_ch, out_ch, stride):
super(Shortcut, self).__init__()
self.stride = stride
self.conv1 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0,
bias=False)
self.conv2 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0,
bias=False)
self.bn = nn.BatchNorm2d(out_ch)
def forward(self, x):
h = F.relu(x)
h1 = F.avg_pool2d(h, 1, self.stride)
h1 = self.conv1(h1)
h2 = F.avg_pool2d(F.pad(h, (-1, 1, -1, 1)), 1, self.stride)
h2 = self.conv2(h2)
h = torch.cat((h1, h2), 1)
return self.bn(h)
class ShakeBottleNeck(nn.Module):
def __init__(self, in_ch, mid_ch, out_ch, cardinary, stride=1):
super(ShakeBottleNeck, self).__init__()
self.equal_io = in_ch == out_ch
self.shortcut = None if self.equal_io else Shortcut(in_ch, out_ch,
stride=stride)
self.branch1 = self._make_branch(in_ch, mid_ch, out_ch, cardinary,
stride)
self.branch2 = self._make_branch(in_ch, mid_ch, out_ch, cardinary,
stride)
def forward(self, x):
h1 = self.branch1(x)
h2 = self.branch2(x)
h = ShakeShake.apply(h1, h2, self.training)
h0 = x if self.equal_io else self.shortcut(x)
return h + h0
def _make_branch(self, in_ch, mid_ch, out_ch, cardinary, stride=1):
return nn.Sequential(nn.Conv2d(in_ch, mid_ch, 1, padding=0, bias=
False), nn.BatchNorm2d(mid_ch), nn.ReLU(inplace=False), nn.
Conv2d(mid_ch, mid_ch, 3, padding=1, stride=stride, groups=
cardinary, bias=False), nn.BatchNorm2d(mid_ch), nn.ReLU(inplace
=False), nn.Conv2d(mid_ch, out_ch, 1, padding=0, bias=False),
nn.BatchNorm2d(out_ch))
class ShakeResNeXt(nn.Module):
def __init__(self, depth, w_base, cardinary, label):
super(ShakeResNeXt, self).__init__()
n_units = (depth - 2) // 9
n_chs = [64, 128, 256, 1024]
self.n_chs = n_chs
self.in_ch = n_chs[0]
self.c_in = nn.Conv2d(3, n_chs[0], 3, padding=1)
self.layer1 = self._make_layer(n_units, n_chs[0], w_base, cardinary)
self.layer2 = self._make_layer(n_units, n_chs[1], w_base, cardinary, 2)
self.layer3 = self._make_layer(n_units, n_chs[2], w_base, cardinary, 2)
self.fc_out = nn.Linear(n_chs[3], label)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
h = self.c_in(x)
h = self.layer1(h)
h = self.layer2(h)
h = self.layer3(h)
h = F.relu(h)
h = F.avg_pool2d(h, 8)
h = h.view(-1, self.n_chs[3])
h = self.fc_out(h)
return h
def _make_layer(self, n_units, n_ch, w_base, cardinary, stride=1):
layers = []
mid_ch, out_ch = n_ch * (w_base // 64) * cardinary, n_ch * 4
for i in range(n_units):
layers.append(ShakeBottleNeck(self.in_ch, mid_ch, out_ch,
cardinary, stride=stride))
self.in_ch, stride = out_ch, 1
return nn.Sequential(*layers)
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {'depth': 1, 'w_base': 4, 'cardinary': 4, 'label': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import math
from torch import nn
from numpy import int64 as int64
import torch.nn.functional as F
from torch.autograd import Variable
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (4, 1024), (1024, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(1048576)](buf1, primals_2,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = torch.ops.aten.avg_pool2d.default(buf1, [8, 8], [8, 8], [0,
0], False, True, None)
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf3, (16, 1024),
(1024, 1), 0), reinterpret_tensor(primals_4, (1024, 4), (1,
1024), 0), alpha=1, beta=1, out=buf4)
del primals_5
return buf4, primals_1, primals_3, buf1, reinterpret_tensor(buf3, (16,
1024), (1024, 1), 0), primals_4
class ShakeShake(torch.autograd.Function):
@staticmethod
def forward(ctx, x1, x2, training=True):
if training:
alpha = torch.FloatTensor(x1.size(0)).uniform_()
alpha = alpha.view(alpha.size(0), 1, 1, 1).expand_as(x1)
else:
alpha = 0.5
return alpha * x1 + (1 - alpha) * x2
@staticmethod
def backward(ctx, grad_output):
beta = torch.FloatTensor(grad_output.size(0)).uniform_()
beta = beta.view(beta.size(0), 1, 1, 1).expand_as(grad_output)
beta = Variable(beta)
return beta * grad_output, (1 - beta) * grad_output, None
class Shortcut(nn.Module):
def __init__(self, in_ch, out_ch, stride):
super(Shortcut, self).__init__()
self.stride = stride
self.conv1 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0,
bias=False)
self.conv2 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0,
bias=False)
self.bn = nn.BatchNorm2d(out_ch)
def forward(self, x):
h = F.relu(x)
h1 = F.avg_pool2d(h, 1, self.stride)
h1 = self.conv1(h1)
h2 = F.avg_pool2d(F.pad(h, (-1, 1, -1, 1)), 1, self.stride)
h2 = self.conv2(h2)
h = torch.cat((h1, h2), 1)
return self.bn(h)
class ShakeBottleNeck(nn.Module):
def __init__(self, in_ch, mid_ch, out_ch, cardinary, stride=1):
super(ShakeBottleNeck, self).__init__()
self.equal_io = in_ch == out_ch
self.shortcut = None if self.equal_io else Shortcut(in_ch, out_ch,
stride=stride)
self.branch1 = self._make_branch(in_ch, mid_ch, out_ch, cardinary,
stride)
self.branch2 = self._make_branch(in_ch, mid_ch, out_ch, cardinary,
stride)
def forward(self, x):
h1 = self.branch1(x)
h2 = self.branch2(x)
h = ShakeShake.apply(h1, h2, self.training)
h0 = x if self.equal_io else self.shortcut(x)
return h + h0
def _make_branch(self, in_ch, mid_ch, out_ch, cardinary, stride=1):
return nn.Sequential(nn.Conv2d(in_ch, mid_ch, 1, padding=0, bias=
False), nn.BatchNorm2d(mid_ch), nn.ReLU(inplace=False), nn.
Conv2d(mid_ch, mid_ch, 3, padding=1, stride=stride, groups=
cardinary, bias=False), nn.BatchNorm2d(mid_ch), nn.ReLU(inplace
=False), nn.Conv2d(mid_ch, out_ch, 1, padding=0, bias=False),
nn.BatchNorm2d(out_ch))
class ShakeResNeXtNew(nn.Module):
def __init__(self, depth, w_base, cardinary, label):
super(ShakeResNeXtNew, self).__init__()
n_units = (depth - 2) // 9
n_chs = [64, 128, 256, 1024]
self.n_chs = n_chs
self.in_ch = n_chs[0]
self.c_in = nn.Conv2d(3, n_chs[0], 3, padding=1)
self.layer1 = self._make_layer(n_units, n_chs[0], w_base, cardinary)
self.layer2 = self._make_layer(n_units, n_chs[1], w_base, cardinary, 2)
self.layer3 = self._make_layer(n_units, n_chs[2], w_base, cardinary, 2)
self.fc_out = nn.Linear(n_chs[3], label)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def _make_layer(self, n_units, n_ch, w_base, cardinary, stride=1):
layers = []
mid_ch, out_ch = n_ch * (w_base // 64) * cardinary, n_ch * 4
for i in range(n_units):
layers.append(ShakeBottleNeck(self.in_ch, mid_ch, out_ch,
cardinary, stride=stride))
self.in_ch, stride = out_ch, 1
return nn.Sequential(*layers)
def forward(self, input_0):
primals_1 = self.c_in.weight
primals_2 = self.c_in.bias
primals_4 = self.fc_out.weight
primals_5 = self.fc_out.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| Josie-Li/ZazuML-easy_AutoML | ShakeResNeXt | false | 2,436 | [
"MIT"
] | 0 | e4daabaab9df518c35abdba35a67607d002bee33 | https://github.com/Josie-Li/ZazuML-easy_AutoML/tree/e4daabaab9df518c35abdba35a67607d002bee33 | import math
import torch
from torch import nn
from numpy import int64 as int64
import torch.nn.functional as F
from torch.autograd import Variable
class ShakeShake(torch.autograd.Function):
@staticmethod
def forward(ctx, x1, x2, training=True):
if training:
alpha = torch.FloatTensor(x1.size(0)).uniform_()
alpha = alpha.view(alpha.size(0), 1, 1, 1).expand_as(x1)
else:
alpha = 0.5
return alpha * x1 + (1 - alpha) * x2
@staticmethod
def backward(ctx, grad_output):
beta = torch.FloatTensor(grad_output.size(0)).uniform_()
beta = beta.view(beta.size(0), 1, 1, 1).expand_as(grad_output)
beta = Variable(beta)
return beta * grad_output, (1 - beta) * grad_output, None
class Shortcut(nn.Module):
def __init__(self, in_ch, out_ch, stride):
super().__init__()
self.stride = stride
self.conv1 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0,
bias=False)
self.conv2 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0,
bias=False)
self.bn = nn.BatchNorm2d(out_ch)
def forward(self, x):
h = F.relu(x)
h1 = F.avg_pool2d(h, 1, self.stride)
h1 = self.conv1(h1)
h2 = F.avg_pool2d(F.pad(h, (-1, 1, -1, 1)), 1, self.stride)
h2 = self.conv2(h2)
h = torch.cat((h1, h2), 1)
return self.bn(h)
class ShakeBottleNeck(nn.Module):
def __init__(self, in_ch, mid_ch, out_ch, cardinary, stride=1):
super().__init__()
self.equal_io = in_ch == out_ch
self.shortcut = None if self.equal_io else Shortcut(in_ch, out_ch,
stride=stride)
self.branch1 = self._make_branch(in_ch, mid_ch, out_ch, cardinary,
stride)
self.branch2 = self._make_branch(in_ch, mid_ch, out_ch, cardinary,
stride)
def forward(self, x):
h1 = self.branch1(x)
h2 = self.branch2(x)
h = ShakeShake.apply(h1, h2, self.training)
h0 = x if self.equal_io else self.shortcut(x)
return h + h0
def _make_branch(self, in_ch, mid_ch, out_ch, cardinary, stride=1):
return nn.Sequential(nn.Conv2d(in_ch, mid_ch, 1, padding=0, bias=
False), nn.BatchNorm2d(mid_ch), nn.ReLU(inplace=False), nn.
Conv2d(mid_ch, mid_ch, 3, padding=1, stride=stride, groups=
cardinary, bias=False), nn.BatchNorm2d(mid_ch), nn.ReLU(inplace
=False), nn.Conv2d(mid_ch, out_ch, 1, padding=0, bias=False),
nn.BatchNorm2d(out_ch))
class Model(nn.Module):
def __init__(self, depth, w_base, cardinary, label):
super().__init__()
n_units = (depth - 2) // 9
n_chs = [64, 128, 256, 1024]
self.n_chs = n_chs
self.in_ch = n_chs[0]
self.c_in = nn.Conv2d(3, n_chs[0], 3, padding=1)
self.layer1 = self._make_layer(n_units, n_chs[0], w_base, cardinary)
self.layer2 = self._make_layer(n_units, n_chs[1], w_base, cardinary, 2)
self.layer3 = self._make_layer(n_units, n_chs[2], w_base, cardinary, 2)
self.fc_out = nn.Linear(n_chs[3], label)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
h = self.c_in(x)
h = self.layer1(h)
h = self.layer2(h)
h = self.layer3(h)
h = F.relu(h)
h = F.avg_pool2d(h, 8)
h = h.view(-1, self.n_chs[3])
h = self.fc_out(h)
return h
def _make_layer(self, n_units, n_ch, w_base, cardinary, stride=1):
layers = []
mid_ch, out_ch = n_ch * (w_base // 64) * cardinary, n_ch
# ... truncated (>4000 chars) for memory efficiency |
MSE | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ox/coxh4zxlwehvc7dn52ckdepn62ln2jmlbsk5mwd7tp7xur2tywk6.py
# Topologically Sorted Source Nodes: [mse_loss], Original ATen: [aten.mse_loss]
# Source node to ATen node mapping:
# mse_loss => mean, pow_1, sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
triton_per_fused_mse_loss_0 = async_compile.triton('triton_per_fused_mse_loss_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mse_loss_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mse_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp8, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mse_loss], Original ATen: [aten.mse_loss]
stream0 = get_raw_stream(0)
triton_per_fused_mse_loss_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class MSE(nn.Module):
def __init__(self):
super().__init__()
self.loss = nn.MSELoss(reduction='mean')
def forward(self, recon, target):
return self.loss(recon, target)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mse_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp8, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mse_loss_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256,
num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class MSENew(nn.Module):
def __init__(self):
super().__init__()
self.loss = nn.MSELoss(reduction='mean')
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| KMU-AELAB/Active_Learning | MSE | false | 2,437 | [
"MIT"
] | 0 | bc569c16b5f12b58989a8f3db59b7eb4e35cce1b | https://github.com/KMU-AELAB/Active_Learning/tree/bc569c16b5f12b58989a8f3db59b7eb4e35cce1b | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
self.loss = nn.MSELoss(reduction='mean')
def forward(self, recon, target):
return self.loss(recon, target)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
ShakeResNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/lp/clp5td7lbqtje3pt7v6xbcp766swgazqemomz2nzsxtdtmjesxht.py
# Topologically Sorted Source Nodes: [h, h_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# h => convolution
# h_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (16, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (16, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (4, 16), (16, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [h, h_1], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 262144, grid=grid(262144), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [h_2], Original ATen: [aten.avg_pool2d]
buf2 = torch.ops.aten.avg_pool2d.default(buf1, [8, 8], [8, 8], [0, 0], False, True, None)
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((256, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf3, (256, 16), (16, 1), 0), reinterpret_tensor(primals_4, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf4)
del primals_5
return (buf4, primals_1, primals_3, buf1, reinterpret_tensor(buf3, (256, 16), (16, 1), 0), primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((16, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
from torch import nn
from numpy import int64 as int64
import torch.nn.functional as F
from torch.autograd import Variable
class ShakeShake(torch.autograd.Function):
@staticmethod
def forward(ctx, x1, x2, training=True):
if training:
alpha = torch.FloatTensor(x1.size(0)).uniform_()
alpha = alpha.view(alpha.size(0), 1, 1, 1).expand_as(x1)
else:
alpha = 0.5
return alpha * x1 + (1 - alpha) * x2
@staticmethod
def backward(ctx, grad_output):
beta = torch.FloatTensor(grad_output.size(0)).uniform_()
beta = beta.view(beta.size(0), 1, 1, 1).expand_as(grad_output)
beta = Variable(beta)
return beta * grad_output, (1 - beta) * grad_output, None
class Shortcut(nn.Module):
def __init__(self, in_ch, out_ch, stride):
super(Shortcut, self).__init__()
self.stride = stride
self.conv1 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0,
bias=False)
self.conv2 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0,
bias=False)
self.bn = nn.BatchNorm2d(out_ch)
def forward(self, x):
h = F.relu(x)
h1 = F.avg_pool2d(h, 1, self.stride)
h1 = self.conv1(h1)
h2 = F.avg_pool2d(F.pad(h, (-1, 1, -1, 1)), 1, self.stride)
h2 = self.conv2(h2)
h = torch.cat((h1, h2), 1)
return self.bn(h)
class ShakeBlock(nn.Module):
def __init__(self, in_ch, out_ch, stride=1):
super(ShakeBlock, self).__init__()
self.equal_io = in_ch == out_ch
self.shortcut = self.equal_io and None or Shortcut(in_ch, out_ch,
stride=stride)
self.branch1 = self._make_branch(in_ch, out_ch, stride)
self.branch2 = self._make_branch(in_ch, out_ch, stride)
def forward(self, x):
h1 = self.branch1(x)
h2 = self.branch2(x)
h = ShakeShake.apply(h1, h2, self.training)
h0 = x if self.equal_io else self.shortcut(x)
return h + h0
def _make_branch(self, in_ch, out_ch, stride=1):
return nn.Sequential(nn.ReLU(inplace=False), nn.Conv2d(in_ch,
out_ch, 3, padding=1, stride=stride, bias=False), nn.
BatchNorm2d(out_ch), nn.ReLU(inplace=False), nn.Conv2d(out_ch,
out_ch, 3, padding=1, stride=1, bias=False), nn.BatchNorm2d(out_ch)
)
class ShakeResNet(nn.Module):
def __init__(self, depth, w_base, label):
super(ShakeResNet, self).__init__()
n_units = (depth - 2) / 6
in_chs = [16, w_base, w_base * 2, w_base * 4]
self.in_chs = in_chs
self.c_in = nn.Conv2d(3, in_chs[0], 3, padding=1)
self.layer1 = self._make_layer(n_units, in_chs[0], in_chs[1])
self.layer2 = self._make_layer(n_units, in_chs[1], in_chs[2], 2)
self.layer3 = self._make_layer(n_units, in_chs[2], in_chs[3], 2)
self.fc_out = nn.Linear(in_chs[3], label)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
h = self.c_in(x)
h = self.layer1(h)
h = self.layer2(h)
h = self.layer3(h)
h = F.relu(h)
h = F.avg_pool2d(h, 8)
h = h.view(-1, self.in_chs[3])
h = self.fc_out(h)
return h
def _make_layer(self, n_units, in_ch, out_ch, stride=1):
layers = []
for i in range(int(n_units)):
layers.append(ShakeBlock(in_ch, out_ch, stride=stride))
in_ch, stride = out_ch, 1
return nn.Sequential(*layers)
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {'depth': 1, 'w_base': 4, 'label': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import math
from torch import nn
from numpy import int64 as int64
import torch.nn.functional as F
from torch.autograd import Variable
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 16
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (16, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (4, 16), (16, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(262144)](buf1, primals_2,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = torch.ops.aten.avg_pool2d.default(buf1, [8, 8], [8, 8], [0,
0], False, True, None)
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((256, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf3, (256, 16),
(16, 1), 0), reinterpret_tensor(primals_4, (16, 4), (1, 16), 0),
alpha=1, beta=1, out=buf4)
del primals_5
return buf4, primals_1, primals_3, buf1, reinterpret_tensor(buf3, (256,
16), (16, 1), 0), primals_4
class ShakeShake(torch.autograd.Function):
@staticmethod
def forward(ctx, x1, x2, training=True):
if training:
alpha = torch.FloatTensor(x1.size(0)).uniform_()
alpha = alpha.view(alpha.size(0), 1, 1, 1).expand_as(x1)
else:
alpha = 0.5
return alpha * x1 + (1 - alpha) * x2
@staticmethod
def backward(ctx, grad_output):
beta = torch.FloatTensor(grad_output.size(0)).uniform_()
beta = beta.view(beta.size(0), 1, 1, 1).expand_as(grad_output)
beta = Variable(beta)
return beta * grad_output, (1 - beta) * grad_output, None
class Shortcut(nn.Module):
def __init__(self, in_ch, out_ch, stride):
super(Shortcut, self).__init__()
self.stride = stride
self.conv1 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0,
bias=False)
self.conv2 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0,
bias=False)
self.bn = nn.BatchNorm2d(out_ch)
def forward(self, x):
h = F.relu(x)
h1 = F.avg_pool2d(h, 1, self.stride)
h1 = self.conv1(h1)
h2 = F.avg_pool2d(F.pad(h, (-1, 1, -1, 1)), 1, self.stride)
h2 = self.conv2(h2)
h = torch.cat((h1, h2), 1)
return self.bn(h)
class ShakeBlock(nn.Module):
def __init__(self, in_ch, out_ch, stride=1):
super(ShakeBlock, self).__init__()
self.equal_io = in_ch == out_ch
self.shortcut = self.equal_io and None or Shortcut(in_ch, out_ch,
stride=stride)
self.branch1 = self._make_branch(in_ch, out_ch, stride)
self.branch2 = self._make_branch(in_ch, out_ch, stride)
def forward(self, x):
h1 = self.branch1(x)
h2 = self.branch2(x)
h = ShakeShake.apply(h1, h2, self.training)
h0 = x if self.equal_io else self.shortcut(x)
return h + h0
def _make_branch(self, in_ch, out_ch, stride=1):
return nn.Sequential(nn.ReLU(inplace=False), nn.Conv2d(in_ch,
out_ch, 3, padding=1, stride=stride, bias=False), nn.
BatchNorm2d(out_ch), nn.ReLU(inplace=False), nn.Conv2d(out_ch,
out_ch, 3, padding=1, stride=1, bias=False), nn.BatchNorm2d(out_ch)
)
class ShakeResNetNew(nn.Module):
def __init__(self, depth, w_base, label):
super(ShakeResNetNew, self).__init__()
n_units = (depth - 2) / 6
in_chs = [16, w_base, w_base * 2, w_base * 4]
self.in_chs = in_chs
self.c_in = nn.Conv2d(3, in_chs[0], 3, padding=1)
self.layer1 = self._make_layer(n_units, in_chs[0], in_chs[1])
self.layer2 = self._make_layer(n_units, in_chs[1], in_chs[2], 2)
self.layer3 = self._make_layer(n_units, in_chs[2], in_chs[3], 2)
self.fc_out = nn.Linear(in_chs[3], label)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def _make_layer(self, n_units, in_ch, out_ch, stride=1):
layers = []
for i in range(int(n_units)):
layers.append(ShakeBlock(in_ch, out_ch, stride=stride))
in_ch, stride = out_ch, 1
return nn.Sequential(*layers)
def forward(self, input_0):
primals_1 = self.c_in.weight
primals_2 = self.c_in.bias
primals_4 = self.fc_out.weight
primals_5 = self.fc_out.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| Josie-Li/ZazuML-easy_AutoML | ShakeResNet | false | 2,438 | [
"MIT"
] | 0 | e4daabaab9df518c35abdba35a67607d002bee33 | https://github.com/Josie-Li/ZazuML-easy_AutoML/tree/e4daabaab9df518c35abdba35a67607d002bee33 | import math
import torch
from torch import nn
from numpy import int64 as int64
import torch.nn.functional as F
from torch.autograd import Variable
class ShakeShake(torch.autograd.Function):
@staticmethod
def forward(ctx, x1, x2, training=True):
if training:
alpha = torch.FloatTensor(x1.size(0)).uniform_()
alpha = alpha.view(alpha.size(0), 1, 1, 1).expand_as(x1)
else:
alpha = 0.5
return alpha * x1 + (1 - alpha) * x2
@staticmethod
def backward(ctx, grad_output):
beta = torch.FloatTensor(grad_output.size(0)).uniform_()
beta = beta.view(beta.size(0), 1, 1, 1).expand_as(grad_output)
beta = Variable(beta)
return beta * grad_output, (1 - beta) * grad_output, None
class Shortcut(nn.Module):
def __init__(self, in_ch, out_ch, stride):
super().__init__()
self.stride = stride
self.conv1 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0,
bias=False)
self.conv2 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0,
bias=False)
self.bn = nn.BatchNorm2d(out_ch)
def forward(self, x):
h = F.relu(x)
h1 = F.avg_pool2d(h, 1, self.stride)
h1 = self.conv1(h1)
h2 = F.avg_pool2d(F.pad(h, (-1, 1, -1, 1)), 1, self.stride)
h2 = self.conv2(h2)
h = torch.cat((h1, h2), 1)
return self.bn(h)
class ShakeBlock(nn.Module):
def __init__(self, in_ch, out_ch, stride=1):
super().__init__()
self.equal_io = in_ch == out_ch
self.shortcut = self.equal_io and None or Shortcut(in_ch, out_ch,
stride=stride)
self.branch1 = self._make_branch(in_ch, out_ch, stride)
self.branch2 = self._make_branch(in_ch, out_ch, stride)
def forward(self, x):
h1 = self.branch1(x)
h2 = self.branch2(x)
h = ShakeShake.apply(h1, h2, self.training)
h0 = x if self.equal_io else self.shortcut(x)
return h + h0
def _make_branch(self, in_ch, out_ch, stride=1):
return nn.Sequential(nn.ReLU(inplace=False), nn.Conv2d(in_ch,
out_ch, 3, padding=1, stride=stride, bias=False), nn.
BatchNorm2d(out_ch), nn.ReLU(inplace=False), nn.Conv2d(out_ch,
out_ch, 3, padding=1, stride=1, bias=False), nn.BatchNorm2d(out_ch)
)
class Model(nn.Module):
def __init__(self, depth, w_base, label):
super().__init__()
n_units = (depth - 2) / 6
in_chs = [16, w_base, w_base * 2, w_base * 4]
self.in_chs = in_chs
self.c_in = nn.Conv2d(3, in_chs[0], 3, padding=1)
self.layer1 = self._make_layer(n_units, in_chs[0], in_chs[1])
self.layer2 = self._make_layer(n_units, in_chs[1], in_chs[2], 2)
self.layer3 = self._make_layer(n_units, in_chs[2], in_chs[3], 2)
self.fc_out = nn.Linear(in_chs[3], label)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
h = self.c_in(x)
h = self.layer1(h)
h = self.layer2(h)
h = self.layer3(h)
h = F.relu(h)
h = F.avg_pool2d(h, 8)
h = h.view(-1, self.in_chs[3])
h = self.fc_out(h)
return h
def _make_layer(self, n_units, in_ch, out_ch, stride=1):
layers = []
for i in range(int(n_units)):
layers.append(ShakeBlock(in_ch, out_ch, stride=stride))
in_ch, stride = out_ch, 1
return nn.Sequential(*layers)
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [1, 4, 4]
|
PatchEmbed | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/kn/cknyjwkwufnzzf4ya3scui55ownkmt5cdh3hggzwsfe3ch5fshzm.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 12
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp0, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/qy/cqyu5l2p6xh633a7thd2tte3bszrg4ugscf2y523iookhmpheqal.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096, 256], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2304
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (256*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (768*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/4c/c4ckui43udehobca2kb3vy5stpaqfztmtjwrdinx2dhmcmh73fmo.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [16, 16], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096, 16], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 3072
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 768
y1 = (yindex // 768)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (768*x2) + (12288*y1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (16*y3)), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_2, (768, 3, 16, 16), (768, 256, 16, 1))
assert_size_stride(primals_3, (768, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_1, buf0, 12, 4096, grid=grid(12, 4096), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((768, 3, 16, 16), (768, 1, 48, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_2, buf1, 2304, 256, grid=grid(2304, 256), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf0, buf1, stride=(16, 16), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 768, 4, 4), (12288, 1, 3072, 768))
buf3 = empty_strided_cuda((4, 768, 4, 4), (12288, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf2, primals_3, buf3, 3072, 16, grid=grid(3072, 16), stream=stream0)
del buf2
del primals_3
return (reinterpret_tensor(buf3, (4, 16, 768), (12288, 1, 16), 0), buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((768, 3, 16, 16), (768, 256, 16, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((768, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
num_patches = img_size // patch_size * (img_size // patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size,
stride=patch_size)
def forward(self, x):
_B, _C, _H, _W = x.shape
x = self.proj(x).flatten(2).transpose(1, 2)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 2304
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 256 * y3), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 768 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_2(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 768
y1 = yindex // 768
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 768 * x2 + 12288 * y1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_2, (768, 3, 16, 16), (768, 256, 16, 1))
assert_size_stride(primals_3, (768,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(12, 4096)](primals_1, buf0, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((768, 3, 16, 16), (768, 1, 48, 3), torch.
float32)
triton_poi_fused_1[grid(2304, 256)](primals_2, buf1, 2304, 256,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf0, buf1, stride=(16, 16),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 768, 4, 4), (12288, 1, 3072, 768))
buf3 = empty_strided_cuda((4, 768, 4, 4), (12288, 16, 4, 1), torch.
float32)
triton_poi_fused_convolution_2[grid(3072, 16)](buf2, primals_3,
buf3, 3072, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del buf2
del primals_3
return reinterpret_tensor(buf3, (4, 16, 768), (12288, 1, 16), 0
), buf0, buf1
class PatchEmbedNew(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
num_patches = img_size // patch_size * (img_size // patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size,
stride=patch_size)
def forward(self, input_0):
primals_2 = self.proj.weight
primals_3 = self.proj.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| IgoshinLab/dino | PatchEmbed | false | 2,439 | [
"Apache-2.0"
] | 0 | 00abaabd8ad2f4edc414a44166a24211dfb75900 | https://github.com/IgoshinLab/dino/tree/00abaabd8ad2f4edc414a44166a24211dfb75900 | import torch
from torch import nn
class Model(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
num_patches = img_size // patch_size * (img_size // patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size,
stride=patch_size)
def forward(self, x):
_B, _C, _H, _W = x.shape
x = self.proj(x).flatten(2).transpose(1, 2)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return []
|
Policy | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ms/cmsuzohbg5nq52jnvirovzkvykrzzko5xomu7zyu5e5u2lhegppw.py
# Topologically Sorted Source Nodes: [xg], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# xg => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/mt/cmttmov7q7l6eww5wgel4xbdmlbbf53sgwydh2ovfk4ks65mt3ki.py
# Topologically Sorted Source Nodes: [a1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# a1 => relu
# Graph fragment:
# %add_tensor_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_5, %primals_4), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_5,), kwargs = {})
triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/b7/cb7iq44xucvx4o4uio3etz5hrrkllxx5igr3vjyglpwcku6mi232.py
# Topologically Sorted Source Nodes: [u_xy_vel], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# u_xy_vel => sigmoid
# Graph fragment:
# %add_tensor_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_3, %primals_8), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_tensor_3,), kwargs = {})
triton_poi_fused_sigmoid_2 = async_compile.triton('triton_poi_fused_sigmoid_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/pr/cprnrofz4rdfkl7dxsmpvmhufuwz34j47yrt7z5sjnmwebkp6lwf.py
# Topologically Sorted Source Nodes: [u_xy_dir], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# u_xy_dir => tanh
# Graph fragment:
# %add_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_10), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_tensor_2,), kwargs = {})
triton_poi_fused_tanh_3 = async_compile.triton('triton_poi_fused_tanh_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/gd/cgdjwj44ajb6xlwswax32rqljgkqktxl3q54ajtnidhrnoa2h3tu.py
# Topologically Sorted Source Nodes: [u_r_vel], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# u_r_vel => tanh_1
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_12), kwargs = {})
# %tanh_1 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_tanh_4 = async_compile.triton('triton_poi_fused_tanh_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = libdevice.tanh(tmp3)
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (64, 8), (8, 1))
assert_size_stride(primals_4, (64, ), (1, ))
assert_size_stride(primals_5, (64, 64), (64, 1))
assert_size_stride(primals_6, (64, ), (1, ))
assert_size_stride(primals_7, (1, 64), (64, 1))
assert_size_stride(primals_8, (1, ), (1, ))
assert_size_stride(primals_9, (2, 64), (64, 1))
assert_size_stride(primals_10, (2, ), (1, ))
assert_size_stride(primals_11, (1, 64), (64, 1))
assert_size_stride(primals_12, (1, ), (1, ))
assert_size_stride(primals_13, (1, 64), (64, 1))
assert_size_stride(primals_14, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [xg], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 64), (1, 8), 0), out=buf1)
del primals_3
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [a1], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf2, primals_4, 256, grid=grid(256), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (64, 64), (1, 64), 0), out=buf3)
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [a2], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf4, primals_6, 256, grid=grid(256), stream=stream0)
del primals_6
buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf4, reinterpret_tensor(primals_7, (64, 1), (1, 64), 0), out=buf5)
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [u_xy_vel], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_2.run(buf6, primals_8, 4, grid=grid(4), stream=stream0)
del primals_8
buf7 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf4, reinterpret_tensor(primals_9, (64, 2), (1, 64), 0), out=buf7)
buf8 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [u_xy_dir], Original ATen: [aten.tanh]
triton_poi_fused_tanh_3.run(buf8, primals_10, 8, grid=grid(8), stream=stream0)
del primals_10
buf9 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf4, reinterpret_tensor(primals_11, (64, 1), (1, 64), 0), out=buf9)
buf10 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [u_r_vel], Original ATen: [aten.tanh]
triton_poi_fused_tanh_4.run(buf10, primals_12, 4, grid=grid(4), stream=stream0)
del primals_12
buf11 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf4, reinterpret_tensor(primals_13, (64, 1), (1, 64), 0), out=buf11)
buf12 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [u_dur], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_2.run(buf12, primals_14, 4, grid=grid(4), stream=stream0)
del primals_14
return (buf6, buf8, buf10, buf12, buf0, buf2, buf4, buf6, buf8, buf10, buf12, primals_13, primals_11, primals_9, primals_7, primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((64, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((2, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((1, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((1, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
from torch.autograd import Variable
class Policy(torch.nn.Module):
def __init__(self, x_size, g_size, u_size, hidden_size=64):
super(Policy, self).__init__()
self.fc1 = torch.nn.Linear(x_size + g_size, hidden_size)
self.fc2 = torch.nn.Linear(hidden_size, hidden_size)
self.fc_xy_vel = torch.nn.Linear(hidden_size, 1)
self.fc_xy_dir = torch.nn.Linear(hidden_size, 2)
self.fc_r_vel = torch.nn.Linear(hidden_size, 1)
self.fc_dur = torch.nn.Linear(hidden_size, 1)
self.register_buffer('xy_vel_scale', torch.FloatTensor([0.6]))
self.register_buffer('r_vel_scale', torch.FloatTensor([1.4]))
self.register_buffer('dur_scale', torch.FloatTensor([0.6]))
def forward(self, x, g):
xg = torch.cat([x, g], dim=1)
a1 = F.relu(self.fc1(xg))
a2 = F.relu(self.fc2(a1))
u_xy_vel = F.sigmoid(self.fc_xy_vel(a2))
u_xy_dir = F.tanh(self.fc_xy_dir(a2))
u_r_vel = F.tanh(self.fc_r_vel(a2))
u_dur = F.sigmoid(self.fc_dur(a2))
return torch.cat([Variable(self.xy_vel_scale) * u_xy_vel * u_xy_dir /
u_xy_dir.norm(dim=1, keepdim=True), Variable(self.r_vel_scale) *
u_r_vel, Variable(self.dur_scale) * u_dur], dim=1)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'x_size': 4, 'g_size': 4, 'u_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_tanh_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused_tanh_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = libdevice.tanh(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (64, 8), (8, 1))
assert_size_stride(primals_4, (64,), (1,))
assert_size_stride(primals_5, (64, 64), (64, 1))
assert_size_stride(primals_6, (64,), (1,))
assert_size_stride(primals_7, (1, 64), (64, 1))
assert_size_stride(primals_8, (1,), (1,))
assert_size_stride(primals_9, (2, 64), (64, 1))
assert_size_stride(primals_10, (2,), (1,))
assert_size_stride(primals_11, (1, 64), (64, 1))
assert_size_stride(primals_12, (1,), (1,))
assert_size_stride(primals_13, (1, 64), (64, 1))
assert_size_stride(primals_14, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 64), (1,
8), 0), out=buf1)
del primals_3
buf2 = buf1
del buf1
triton_poi_fused_relu_1[grid(256)](buf2, primals_4, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (64, 64), (1,
64), 0), out=buf3)
buf4 = buf3
del buf3
triton_poi_fused_relu_1[grid(256)](buf4, primals_6, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_6
buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf4, reinterpret_tensor(primals_7, (64, 1), (1,
64), 0), out=buf5)
buf6 = buf5
del buf5
triton_poi_fused_sigmoid_2[grid(4)](buf6, primals_8, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_8
buf7 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
extern_kernels.mm(buf4, reinterpret_tensor(primals_9, (64, 2), (1,
64), 0), out=buf7)
buf8 = buf7
del buf7
triton_poi_fused_tanh_3[grid(8)](buf8, primals_10, 8, XBLOCK=8,
num_warps=1, num_stages=1)
del primals_10
buf9 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf4, reinterpret_tensor(primals_11, (64, 1), (1,
64), 0), out=buf9)
buf10 = buf9
del buf9
triton_poi_fused_tanh_4[grid(4)](buf10, primals_12, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_12
buf11 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf4, reinterpret_tensor(primals_13, (64, 1), (1,
64), 0), out=buf11)
buf12 = buf11
del buf11
triton_poi_fused_sigmoid_2[grid(4)](buf12, primals_14, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_14
return (buf6, buf8, buf10, buf12, buf0, buf2, buf4, buf6, buf8, buf10,
buf12, primals_13, primals_11, primals_9, primals_7, primals_5)
class PolicyNew(torch.nn.Module):
def __init__(self, x_size, g_size, u_size, hidden_size=64):
super(PolicyNew, self).__init__()
self.fc1 = torch.nn.Linear(x_size + g_size, hidden_size)
self.fc2 = torch.nn.Linear(hidden_size, hidden_size)
self.fc_xy_vel = torch.nn.Linear(hidden_size, 1)
self.fc_xy_dir = torch.nn.Linear(hidden_size, 2)
self.fc_r_vel = torch.nn.Linear(hidden_size, 1)
self.fc_dur = torch.nn.Linear(hidden_size, 1)
self.register_buffer('xy_vel_scale', torch.FloatTensor([0.6]))
self.register_buffer('r_vel_scale', torch.FloatTensor([1.4]))
self.register_buffer('dur_scale', torch.FloatTensor([0.6]))
def forward(self, input_0, input_1):
primals_3 = self.fc1.weight
primals_4 = self.fc1.bias
primals_5 = self.fc2.weight
primals_6 = self.fc2.bias
primals_7 = self.fc_xy_vel.weight
primals_8 = self.fc_xy_vel.bias
primals_9 = self.fc_xy_dir.weight
primals_10 = self.fc_xy_dir.bias
primals_11 = self.fc_r_vel.weight
primals_12 = self.fc_r_vel.bias
primals_13 = self.fc_dur.weight
primals_14 = self.fc_dur.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14])
return output[0]
| JoshuaHaustein/oracle_server | Policy | false | 2,440 | [
"BSD-3-Clause"
] | 0 | 9dc54cd03e28eee6d546b811ce32bcc4d16cec0c | https://github.com/JoshuaHaustein/oracle_server/tree/9dc54cd03e28eee6d546b811ce32bcc4d16cec0c | import torch
import torch.nn.functional as F
from torch.autograd import Variable
class Model(torch.nn.Module):
def __init__(self, x_size, g_size, u_size, hidden_size=64):
super().__init__()
self.fc1 = torch.nn.Linear(x_size + g_size, hidden_size)
self.fc2 = torch.nn.Linear(hidden_size, hidden_size)
self.fc_xy_vel = torch.nn.Linear(hidden_size, 1)
self.fc_xy_dir = torch.nn.Linear(hidden_size, 2)
self.fc_r_vel = torch.nn.Linear(hidden_size, 1)
self.fc_dur = torch.nn.Linear(hidden_size, 1)
self.register_buffer('xy_vel_scale', torch.FloatTensor([0.6]))
self.register_buffer('r_vel_scale', torch.FloatTensor([1.4]))
self.register_buffer('dur_scale', torch.FloatTensor([0.6]))
def forward(self, x, g):
xg = torch.cat([x, g], dim=1)
a1 = F.relu(self.fc1(xg))
a2 = F.relu(self.fc2(a1))
u_xy_vel = F.sigmoid(self.fc_xy_vel(a2))
u_xy_dir = F.tanh(self.fc_xy_dir(a2))
u_r_vel = F.tanh(self.fc_r_vel(a2))
u_dur = F.sigmoid(self.fc_dur(a2))
return torch.cat([Variable(self.xy_vel_scale) * u_xy_vel * u_xy_dir /
u_xy_dir.norm(dim=1, keepdim=True), Variable(self.r_vel_scale) *
u_r_vel, Variable(self.dur_scale) * u_dur], dim=1)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
CNN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/pm/cpm4jmajpichlwvu5d2eexd6tfegyooy23ydo24lqoun6fhgxiyo.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%squeeze,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 2)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ix/cixbpgq3us3txgxynaezdx3dbimmut6c662iwdx2laapc3c7af5n.py
# Topologically Sorted Source Nodes: [adaptive_max_pool1d], Original ATen: [aten.adaptive_max_pool2d]
# Source node to ATen node mapping:
# adaptive_max_pool1d => adaptive_max_pool2d, getitem_1
# Graph fragment:
# %adaptive_max_pool2d : [num_users=2] = call_function[target=torch.ops.aten.adaptive_max_pool2d.default](args = (%unsqueeze_1, [1, 1]), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%adaptive_max_pool2d, 1), kwargs = {})
triton_poi_fused_adaptive_max_pool2d_1 = async_compile.triton('triton_poi_fused_adaptive_max_pool2d_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_adaptive_max_pool2d_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_adaptive_max_pool2d_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (2*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp7 = tl.full([1], 2, tl.int32)
tmp8 = tl.where((tmp5 < 0) != (tmp7 < 0), tl.where(tmp5 % tmp7 != 0, tmp5 // tmp7 - 1, tmp5 // tmp7), tmp5 // tmp7)
tmp9 = tmp8 * tmp7
tmp10 = tmp5 - tmp9
tmp11 = tl.full([1], 0, tl.int64)
tmp12 = tmp11 + tmp8
tmp13 = tmp11 + tmp10
tmp14 = tl.full([1], 2, tl.int64)
tmp15 = tmp12 * tmp14
tmp16 = tmp15 + tmp13
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr1 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 5), (20, 5, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 2), (8, 2, 1))
buf1 = reinterpret_tensor(buf0, (4, 2), (2, 1), 0); del buf0 # reuse
buf4 = empty_strided_cuda((4, 2), (2, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf4, 8, grid=grid(8), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.int64)
buf3 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [adaptive_max_pool1d], Original ATen: [aten.adaptive_max_pool2d]
triton_poi_fused_adaptive_max_pool2d_1.run(buf1, buf2, buf3, 4, grid=grid(4), stream=stream0)
return (reinterpret_tensor(buf3, (4, ), (1, ), 0), primals_1, reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (4, 1, 2), (2, 2, 1), 0), buf2, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 5), (20, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.utils
class CNN(nn.Module):
def __init__(self, e_char, e_word):
""" Init CNN.
@param e_word (int): Output embedding size of target char.
@param e_word (int): Output embedding size of target word.
"""
super(CNN, self).__init__()
self.conv = nn.Conv1d(in_channels=e_char, out_channels=e_word,
kernel_size=5, padding=1)
self.ReLU = nn.ReLU()
self.maxpool = nn.AdaptiveMaxPool1d(output_size=1)
def forward(self, x_reshaped):
""" Forward pass of CNN.
@param x_reshaped (Tensor): tensor after padding and embedding lookup, shape (src_len * batch_size, e_char, m_word)
@returns x_conv_out (Tensor): output tensor after highway layer, shape (src_len * batch_size, e_word)
"""
relu = self.ReLU(self.conv(x_reshaped))
x_conv_out = self.maxpool(relu).squeeze(-1)
return x_conv_out
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'e_char': 4, 'e_word': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 2
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_adaptive_max_pool2d_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 2 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp7 = tl.full([1], 2, tl.int32)
tmp8 = tl.where((tmp5 < 0) != (tmp7 < 0), tl.where(tmp5 % tmp7 != 0,
tmp5 // tmp7 - 1, tmp5 // tmp7), tmp5 // tmp7)
tmp9 = tmp8 * tmp7
tmp10 = tmp5 - tmp9
tmp11 = tl.full([1], 0, tl.int64)
tmp12 = tmp11 + tmp8
tmp13 = tmp11 + tmp10
tmp14 = tl.full([1], 2, tl.int64)
tmp15 = tmp12 * tmp14
tmp16 = tmp15 + tmp13
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 5), (20, 5, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1,
4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(1,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf0, (1, 4, 2), (8, 2, 1))
buf1 = reinterpret_tensor(buf0, (4, 2), (2, 1), 0)
del buf0
buf4 = empty_strided_cuda((4, 2), (2, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(8)](buf1, primals_2,
buf4, 8, XBLOCK=8, num_warps=1, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.int64)
buf3 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
triton_poi_fused_adaptive_max_pool2d_1[grid(4)](buf1, buf2, buf3, 4,
XBLOCK=4, num_warps=1, num_stages=1)
return reinterpret_tensor(buf3, (4,), (1,), 0
), primals_1, reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(buf1, (4, 1, 2), (2, 2, 1), 0), buf2, buf4
class CNNNew(nn.Module):
def __init__(self, e_char, e_word):
""" Init CNN.
@param e_word (int): Output embedding size of target char.
@param e_word (int): Output embedding size of target word.
"""
super(CNNNew, self).__init__()
self.conv = nn.Conv1d(in_channels=e_char, out_channels=e_word,
kernel_size=5, padding=1)
self.ReLU = nn.ReLU()
self.maxpool = nn.AdaptiveMaxPool1d(output_size=1)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| KIONLEE/cs224n | CNN | false | 2,441 | [
"MIT"
] | 0 | 63054e187fb40d65af058673fe7aa2f22433da6e | https://github.com/KIONLEE/cs224n/tree/63054e187fb40d65af058673fe7aa2f22433da6e | import torch
import torch.nn as nn
import torch.nn.utils
class Model(nn.Module):
def __init__(self, e_char, e_word):
""" Init CNN.
@param e_word (int): Output embedding size of target char.
@param e_word (int): Output embedding size of target word.
"""
super().__init__()
self.conv = nn.Conv1d(in_channels=e_char, out_channels=e_word,
kernel_size=5, padding=1)
self.ReLU = nn.ReLU()
self.maxpool = nn.AdaptiveMaxPool1d(output_size=1)
def forward(self, x_reshaped):
""" Forward pass of CNN.
@param x_reshaped (Tensor): tensor after padding and embedding lookup, shape (src_len * batch_size, e_char, m_word)
@returns x_conv_out (Tensor): output tensor after highway layer, shape (src_len * batch_size, e_word)
"""
relu = self.ReLU(self.conv(x_reshaped))
x_conv_out = self.maxpool(relu).squeeze(-1)
return x_conv_out
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [4, 4]
|
diag_offdiag_maxpool | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/z4/cz46z3dq5qodaimel7dv62vekmpk4ksq7th54olssudfutixvgvi.py
# Topologically Sorted Source Nodes: [max_1, max_val], Original ATen: [aten.max]
# Source node to ATen node mapping:
# max_1 => max_1
# max_val => max_2
# Graph fragment:
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%diagonal, 2), kwargs = {})
# %max_2 : [num_users=1] = call_function[target=torch.ops.aten.max.default](args = (%getitem,), kwargs = {})
triton_per_fused_max_0 = async_compile.triton('triton_per_fused_max_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_max_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_max_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (16*r0), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (5 + (16*r0)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (10 + (16*r0)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (15 + (16*r0)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = triton_helpers.max2(tmp7, 1)[:, None]
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp9, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/an/can3q5iicacaiarr3s3lhazp34y4o7vrl4aujiyo4cuyuo56ndle.py
# Topologically Sorted Source Nodes: [mul, min_val], Original ATen: [aten.mul, aten.max]
# Source node to ATen node mapping:
# min_val => max_3
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, -1), kwargs = {})
# %max_3 : [num_users=1] = call_function[target=torch.ops.aten.max.default](args = (%mul,), kwargs = {})
triton_per_fused_max_mul_1 = async_compile.triton('triton_per_fused_max_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_max_mul_1', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_max_mul_1(in_ptr0, out_ptr0, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = -1.0
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp3, 0))
tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp5, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/iw/ciwzrjqk4lzlt6b4l3t5t2q37pukslnoccrywczshjiufk5kfpky.py
# Topologically Sorted Source Nodes: [sub, max_4], Original ATen: [aten.sub, aten.max]
# Source node to ATen node mapping:
# max_4 => max_4
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %unsqueeze_3), kwargs = {})
# %max_4 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%sub, 2), kwargs = {})
triton_poi_fused_max_sub_2 = async_compile.triton('triton_poi_fused_max_sub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_sub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_sub_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp4 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (0))
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp10 = tl.load(in_ptr2 + (0))
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp16 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp19 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp28 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp37 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = x0
tmp3 = tmp1 == tmp2
tmp5 = 0.0
tmp6 = tl.where(tmp3, tmp4, tmp5)
tmp7 = tmp6 * tmp5
tmp12 = tmp9 + tmp11
tmp13 = tl_math.abs(tmp12)
tmp14 = tmp7 + tmp13
tmp15 = tmp0 - tmp14
tmp17 = tl.full([1], 1, tl.int64)
tmp18 = tmp17 == tmp2
tmp20 = tl.where(tmp18, tmp19, tmp5)
tmp21 = tmp20 * tmp5
tmp22 = tmp21 + tmp13
tmp23 = tmp16 - tmp22
tmp24 = triton_helpers.maximum(tmp15, tmp23)
tmp26 = tl.full([1], 2, tl.int64)
tmp27 = tmp26 == tmp2
tmp29 = tl.where(tmp27, tmp28, tmp5)
tmp30 = tmp29 * tmp5
tmp31 = tmp30 + tmp13
tmp32 = tmp25 - tmp31
tmp33 = triton_helpers.maximum(tmp24, tmp32)
tmp35 = tl.full([1], 3, tl.int64)
tmp36 = tmp35 == tmp2
tmp38 = tl.where(tmp36, tmp37, tmp5)
tmp39 = tmp38 * tmp5
tmp40 = tmp39 + tmp13
tmp41 = tmp34 - tmp40
tmp42 = triton_helpers.maximum(tmp33, tmp41)
tl.store(out_ptr0 + (x2), tmp42, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/3c/c3cddeoyu6jmox52voje7xldnnecnaowb53oavkcdosdyg2rjyck.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%getitem, %getitem_4], 1), kwargs = {})
triton_poi_fused_cat_3 = async_compile.triton('triton_poi_fused_cat_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((16*x0) + (64*x1)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (5 + (16*x0) + (64*x1)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tl.load(in_ptr0 + (10 + (16*x0) + (64*x1)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = tl.load(in_ptr0 + (15 + (16*x0) + (64*x1)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tmp15 = tl.full([1], 8, tl.int64)
tmp16 = tmp0 < tmp15
tmp17 = tl.load(in_ptr1 + ((4*((-4) + x0)) + (16*x1)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tl.load(in_ptr1 + (1 + (4*((-4) + x0)) + (16*x1)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp19 = triton_helpers.maximum(tmp17, tmp18)
tmp20 = tl.load(in_ptr1 + (2 + (4*((-4) + x0)) + (16*x1)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = triton_helpers.maximum(tmp19, tmp20)
tmp22 = tl.load(in_ptr1 + (3 + (4*((-4) + x0)) + (16*x1)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp23 = triton_helpers.maximum(tmp21, tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp14, tmp23, tmp24)
tmp26 = tl.where(tmp4, tmp13, tmp25)
tl.store(out_ptr0 + (x2), tmp26, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [max_1, max_val], Original ATen: [aten.max]
stream0 = get_raw_stream(0)
triton_per_fused_max_0.run(arg0_1, buf0, 1, 16, grid=grid(1), stream=stream0)
buf1 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [mul, min_val], Original ATen: [aten.mul, aten.max]
triton_per_fused_max_mul_1.run(arg0_1, buf1, 1, 256, grid=grid(1), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, max_4], Original ATen: [aten.sub, aten.max]
triton_poi_fused_max_sub_2.run(arg0_1, buf0, buf1, buf2, 64, grid=grid(64), stream=stream0)
del buf0
del buf1
buf3 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
triton_poi_fused_cat_3.run(arg0_1, buf2, buf3, 32, grid=grid(32), stream=stream0)
del arg0_1
del buf2
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class diag_offdiag_maxpool(torch.nn.Module):
"""diag_offdiag_maxpool"""
def __init__(self):
super(diag_offdiag_maxpool, self).__init__()
def forward(self, inputs):
max_diag = torch.max(torch.diagonal(inputs, dim1=-2, dim2=-1), dim=2)[0
]
max_val = torch.max(max_diag)
min_val = torch.max(torch.mul(inputs, -1))
val = torch.abs(max_val + min_val)
min_mat = torch.unsqueeze(torch.unsqueeze(torch.diagonal(torch.add(
torch.mul(torch.diag_embed(inputs[0][0]), 0), val)), dim=0), dim=0)
max_offdiag = torch.max(torch.max(torch.sub(inputs, min_mat), dim=2
)[0], dim=2)[0]
return torch.cat((max_diag, max_offdiag), dim=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_max_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 16 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (5 + 16 * r0), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (10 + 16 * r0), None, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (15 + 16 * r0), None, eviction_policy='evict_last'
)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = triton_helpers.max2(tmp7, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp9, None)
@triton.jit
def triton_per_fused_max_mul_1(in_ptr0, out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = -1.0
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp3, 0))
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp5, None)
@triton.jit
def triton_poi_fused_max_sub_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp4 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + 0)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp10 = tl.load(in_ptr2 + 0)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp16 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp19 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp28 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp34 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp37 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = x0
tmp3 = tmp1 == tmp2
tmp5 = 0.0
tmp6 = tl.where(tmp3, tmp4, tmp5)
tmp7 = tmp6 * tmp5
tmp12 = tmp9 + tmp11
tmp13 = tl_math.abs(tmp12)
tmp14 = tmp7 + tmp13
tmp15 = tmp0 - tmp14
tmp17 = tl.full([1], 1, tl.int64)
tmp18 = tmp17 == tmp2
tmp20 = tl.where(tmp18, tmp19, tmp5)
tmp21 = tmp20 * tmp5
tmp22 = tmp21 + tmp13
tmp23 = tmp16 - tmp22
tmp24 = triton_helpers.maximum(tmp15, tmp23)
tmp26 = tl.full([1], 2, tl.int64)
tmp27 = tmp26 == tmp2
tmp29 = tl.where(tmp27, tmp28, tmp5)
tmp30 = tmp29 * tmp5
tmp31 = tmp30 + tmp13
tmp32 = tmp25 - tmp31
tmp33 = triton_helpers.maximum(tmp24, tmp32)
tmp35 = tl.full([1], 3, tl.int64)
tmp36 = tmp35 == tmp2
tmp38 = tl.where(tmp36, tmp37, tmp5)
tmp39 = tmp38 * tmp5
tmp40 = tmp39 + tmp13
tmp41 = tmp34 - tmp40
tmp42 = triton_helpers.maximum(tmp33, tmp41)
tl.store(out_ptr0 + x2, tmp42, xmask)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (16 * x0 + 64 * x1), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (5 + 16 * x0 + 64 * x1), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tl.load(in_ptr0 + (10 + 16 * x0 + 64 * x1), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = tl.load(in_ptr0 + (15 + 16 * x0 + 64 * x1), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp17 = tl.load(in_ptr1 + (4 * (-4 + x0) + 16 * x1), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp18 = tl.load(in_ptr1 + (1 + 4 * (-4 + x0) + 16 * x1), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp19 = triton_helpers.maximum(tmp17, tmp18)
tmp20 = tl.load(in_ptr1 + (2 + 4 * (-4 + x0) + 16 * x1), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp21 = triton_helpers.maximum(tmp19, tmp20)
tmp22 = tl.load(in_ptr1 + (3 + 4 * (-4 + x0) + 16 * x1), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp23 = triton_helpers.maximum(tmp21, tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp14, tmp23, tmp24)
tmp26 = tl.where(tmp4, tmp13, tmp25)
tl.store(out_ptr0 + x2, tmp26, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_max_0[grid(1)](arg0_1, buf0, 1, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf1 = empty_strided_cuda((), (), torch.float32)
triton_per_fused_max_mul_1[grid(1)](arg0_1, buf1, 1, 256, num_warps
=2, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_max_sub_2[grid(64)](arg0_1, buf0, buf1, buf2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf0
del buf1
buf3 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
triton_poi_fused_cat_3[grid(32)](arg0_1, buf2, buf3, 32, XBLOCK=32,
num_warps=1, num_stages=1)
del arg0_1
del buf2
return buf3,
class diag_offdiag_maxpoolNew(torch.nn.Module):
"""diag_offdiag_maxpool"""
def __init__(self):
super(diag_offdiag_maxpoolNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| JoshuaMitton/InvariantGraphNetworks | diag_offdiag_maxpool | false | 2,442 | [
"Apache-2.0"
] | 0 | f6d8f43c7a053425eee785d11c5de91ac50f367c | https://github.com/JoshuaMitton/InvariantGraphNetworks/tree/f6d8f43c7a053425eee785d11c5de91ac50f367c | import torch
class Model(torch.nn.Module):
"""diag_offdiag_maxpool"""
def __init__(self):
super().__init__()
def forward(self, inputs):
max_diag = torch.max(torch.diagonal(inputs, dim1=-2, dim2=-1), dim=2)[0
]
max_val = torch.max(max_diag)
min_val = torch.max(torch.mul(inputs, -1))
val = torch.abs(max_val + min_val)
min_mat = torch.unsqueeze(torch.unsqueeze(torch.diagonal(torch.add(
torch.mul(torch.diag_embed(inputs[0][0]), 0), val)), dim=0), dim=0)
max_offdiag = torch.max(torch.max(torch.sub(inputs, min_mat), dim=2
)[0], dim=2)[0]
return torch.cat((max_diag, max_offdiag), dim=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
LastLevelMaxPool | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/7x/c7xzocag6hze7uuiyz32ow2ikcanvueomksqpljyhexuxldxtjgh.py
# Topologically Sorted Source Nodes: [max_pool2d], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# max_pool2d => getitem
# Graph fragment:
# %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (8*x1)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_pool2d], Original ATen: [aten.max_pool2d_with_indices]
stream0 = get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torchvision.transforms import functional as F
import torch.utils.data
from torch import nn
import torch.nn.functional as F
class LastLevelMaxPool(nn.Module):
def forward(self, x):
return [F.max_pool2d(x, 1, 2, 0)]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + x2, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0[grid(64)](arg0_1, buf0,
64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
return buf0,
class LastLevelMaxPoolNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| AgatheMo/maskscoring_rcnn-1 | LastLevelMaxPool | false | 2,443 | [
"MIT"
] | 0 | ed6349caa94c2e23c971784c8aeeafc9f85cde63 | https://github.com/AgatheMo/maskscoring_rcnn-1/tree/ed6349caa94c2e23c971784c8aeeafc9f85cde63 | import torch
from torchvision.transforms import functional as F
import torch.utils.data
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
def forward(self, x):
return [F.max_pool2d(x, 1, 2, 0)]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
RankingLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/sb/csb3yn2vfnide3womhvmhsqxjqxl375rkxhqvgtsa3dhf6oka6pp.py
# Topologically Sorted Source Nodes: [clamp, ones, binary_cross_entropy, pred_loss_1], Original ATen: [aten.clamp, aten.sign, aten.binary_cross_entropy, aten.sigmoid]
# Source node to ATen node mapping:
# binary_cross_entropy => full_default, full_default_1, log, log1p, maximum, maximum_1, mean, mul, mul_1, neg, sub_2, sub_3
# clamp => clamp_min
# ones => sign
# pred_loss_1 => sigmoid
# Graph fragment:
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%slice_1, 0), kwargs = {})
# %sign : [num_users=2] = call_function[target=torch.ops.aten.sign.default](args = (%clamp_min,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sign, 1), kwargs = {})
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%slice_2,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sigmoid,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%neg,), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log1p, %full_default), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %maximum), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sigmoid,), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %maximum_1 : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log, %full_default_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sign, %maximum_1), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mul_1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_3,), kwargs = {})
triton_per_fused_binary_cross_entropy_clamp_sigmoid_sign_0 = async_compile.triton('triton_per_fused_binary_cross_entropy_clamp_sigmoid_sign_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 128],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_clamp_sigmoid_sign_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_binary_cross_entropy_clamp_sigmoid_sign_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 128
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
r0 = rindex % 64
r1 = (rindex // 64)
tmp0 = tl.load(in_ptr0 + (r2), None)
tmp1 = tl.load(in_ptr0 + (192 + r0 + ((-64)*r1)), None)
tmp14 = tl.load(in_ptr1 + (r2), None)
tmp15 = tl.load(in_ptr1 + (192 + r0 + ((-64)*r1)), None)
tmp2 = tmp0 - tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = tl.full([1, 1], 0, tl.int32)
tmp6 = tmp5 < tmp4
tmp7 = tmp6.to(tl.int8)
tmp8 = tmp4 < tmp5
tmp9 = tmp8.to(tl.int8)
tmp10 = tmp7 - tmp9
tmp11 = tmp10.to(tmp4.dtype)
tmp12 = 1.0
tmp13 = tmp11 - tmp12
tmp16 = tmp14 - tmp15
tmp17 = tl.sigmoid(tmp16)
tmp18 = -tmp17
tmp19 = libdevice.log1p(tmp18)
tmp20 = -100.0
tmp21 = triton_helpers.maximum(tmp19, tmp20)
tmp22 = tmp13 * tmp21
tmp23 = tl_math.log(tmp17)
tmp24 = triton_helpers.maximum(tmp23, tmp20)
tmp25 = tmp11 * tmp24
tmp26 = tmp22 - tmp25
tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK])
tmp29 = tl.sum(tmp27, 1)[:, None]
tmp30 = 128.0
tmp31 = tmp29 / tmp30
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp31, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [clamp, ones, binary_cross_entropy, pred_loss_1], Original ATen: [aten.clamp, aten.sign, aten.binary_cross_entropy, aten.sigmoid]
stream0 = get_raw_stream(0)
triton_per_fused_binary_cross_entropy_clamp_sigmoid_sign_0.run(buf1, arg0_1, arg1_1, 1, 128, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class RankingLoss(nn.Module):
def __init__(self):
super().__init__()
self.bce = nn.BCELoss()
def forward(self, pred_loss, target_loss):
target = (target_loss - target_loss.flip(0))[:target_loss.size(0) // 2]
target = target.detach()
ones = torch.sign(torch.clamp(target, min=0))
pred_loss = (pred_loss - pred_loss.flip(0))[:pred_loss.size(0) // 2]
pred_loss = torch.sigmoid(pred_loss)
return self.bce(pred_loss, ones)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_binary_cross_entropy_clamp_sigmoid_sign_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
r0 = rindex % 64
r1 = rindex // 64
tmp0 = tl.load(in_ptr0 + r2, None)
tmp1 = tl.load(in_ptr0 + (192 + r0 + -64 * r1), None)
tmp14 = tl.load(in_ptr1 + r2, None)
tmp15 = tl.load(in_ptr1 + (192 + r0 + -64 * r1), None)
tmp2 = tmp0 - tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = tl.full([1, 1], 0, tl.int32)
tmp6 = tmp5 < tmp4
tmp7 = tmp6.to(tl.int8)
tmp8 = tmp4 < tmp5
tmp9 = tmp8.to(tl.int8)
tmp10 = tmp7 - tmp9
tmp11 = tmp10.to(tmp4.dtype)
tmp12 = 1.0
tmp13 = tmp11 - tmp12
tmp16 = tmp14 - tmp15
tmp17 = tl.sigmoid(tmp16)
tmp18 = -tmp17
tmp19 = libdevice.log1p(tmp18)
tmp20 = -100.0
tmp21 = triton_helpers.maximum(tmp19, tmp20)
tmp22 = tmp13 * tmp21
tmp23 = tl_math.log(tmp17)
tmp24 = triton_helpers.maximum(tmp23, tmp20)
tmp25 = tmp11 * tmp24
tmp26 = tmp22 - tmp25
tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK])
tmp29 = tl.sum(tmp27, 1)[:, None]
tmp30 = 128.0
tmp31 = tmp29 / tmp30
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp31, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_binary_cross_entropy_clamp_sigmoid_sign_0[grid(1)](
buf1, arg0_1, arg1_1, 1, 128, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class RankingLossNew(nn.Module):
def __init__(self):
super().__init__()
self.bce = nn.BCELoss()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| KMU-AELAB/Active_Learning | RankingLoss | false | 2,444 | [
"MIT"
] | 0 | bc569c16b5f12b58989a8f3db59b7eb4e35cce1b | https://github.com/KMU-AELAB/Active_Learning/tree/bc569c16b5f12b58989a8f3db59b7eb4e35cce1b | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
self.bce = nn.BCELoss()
def forward(self, pred_loss, target_loss):
target = (target_loss - target_loss.flip(0))[:target_loss.size(0) // 2]
target = target.detach()
ones = torch.sign(torch.clamp(target, min=0))
pred_loss = (pred_loss - pred_loss.flip(0))[:pred_loss.size(0) // 2]
pred_loss = torch.sigmoid(pred_loss)
return self.bce(pred_loss, ones)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
WordPredictor | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/dp/cdpfrqskwwuqnfeupok3qgc45wzitvxhdnpcf5uabibiblorlnoa.py
# Topologically Sorted Source Nodes: [hidden, mean_hidden, max_1, add], Original ATen: [aten.relu, aten.mean, aten.max, aten.add]
# Source node to ATen node mapping:
# add => add
# hidden => relu
# max_1 => max_1
# mean_hidden => mean
# Graph fragment:
# %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%relu, [0]), kwargs = {})
# %max_1 : [num_users=2] = call_function[target=torch.ops.aten.max.dim](args = (%relu, 0), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, %getitem), kwargs = {})
triton_poi_fused_add_max_mean_relu_0 = async_compile.triton('triton_poi_fused_add_max_mean_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i64', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_max_mean_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_max_mean_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x2), xmask)
tmp23 = tl.load(in_ptr0 + (32 + x2), xmask)
tmp40 = tl.load(in_ptr0 + (48 + x2), xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp5 + tmp1
tmp7 = triton_helpers.maximum(tmp3, tmp6)
tmp8 = tmp4 > tmp7
tmp9 = tmp4 == tmp7
tmp10 = tmp4 != tmp4
tmp11 = tmp7 != tmp7
tmp12 = tmp10 > tmp11
tmp13 = tmp8 | tmp12
tmp14 = tmp10 & tmp11
tmp15 = tmp9 | tmp14
tmp16 = tl.full([1], 0, tl.int64)
tmp17 = tl.full([1], 1, tl.int64)
tmp18 = tmp16 < tmp17
tmp19 = tmp15 & tmp18
tmp20 = tmp13 | tmp19
tmp21 = tl.where(tmp20, tmp4, tmp7)
tmp22 = tl.where(tmp20, tmp16, tmp17)
tmp24 = tmp23 + tmp1
tmp25 = triton_helpers.maximum(tmp3, tmp24)
tmp26 = tmp21 > tmp25
tmp27 = tmp21 == tmp25
tmp28 = tmp21 != tmp21
tmp29 = tmp25 != tmp25
tmp30 = tmp28 > tmp29
tmp31 = tmp26 | tmp30
tmp32 = tmp28 & tmp29
tmp33 = tmp27 | tmp32
tmp34 = tl.full([1], 2, tl.int64)
tmp35 = tmp22 < tmp34
tmp36 = tmp33 & tmp35
tmp37 = tmp31 | tmp36
tmp38 = tl.where(tmp37, tmp21, tmp25)
tmp39 = tl.where(tmp37, tmp22, tmp34)
tmp41 = tmp40 + tmp1
tmp42 = triton_helpers.maximum(tmp3, tmp41)
tmp43 = tmp38 > tmp42
tmp44 = tmp38 == tmp42
tmp45 = tmp38 != tmp38
tmp46 = tmp42 != tmp42
tmp47 = tmp45 > tmp46
tmp48 = tmp43 | tmp47
tmp49 = tmp45 & tmp46
tmp50 = tmp44 | tmp49
tmp51 = tl.full([1], 3, tl.int64)
tmp52 = tmp39 < tmp51
tmp53 = tmp50 & tmp52
tmp54 = tmp48 | tmp53
tmp55 = tl.where(tmp54, tmp38, tmp42)
tmp56 = tl.where(tmp54, tmp39, tmp51)
tmp57 = tmp4 + tmp7
tmp58 = tmp57 + tmp25
tmp59 = tmp58 + tmp42
tmp60 = 4.0
tmp61 = tmp59 / tmp60
tmp62 = triton_helpers.maximum(tmp4, tmp7)
tmp63 = triton_helpers.maximum(tmp62, tmp25)
tmp64 = triton_helpers.maximum(tmp63, tmp42)
tmp65 = tmp61 + tmp64
tl.store(out_ptr0 + (x2), tmp56, xmask)
tl.store(out_ptr1 + (x2), tmp65, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/i3/ci32bshm7vv6yycmhvqgk6df7gy4rk2dkcyol7iwwj7ttakuvnhx.py
# Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# hidden => relu
# Graph fragment:
# %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [hidden, mean_hidden, max_1, add], Original ATen: [aten.relu, aten.mean, aten.max, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_max_mean_relu_0.run(buf0, primals_3, buf1, buf2, 16, grid=grid(16), stream=stream0)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [logits], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf0, primals_3, buf4, 64, grid=grid(64), stream=stream0)
del buf0
del primals_3
return (buf3, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf2, primals_4, reinterpret_tensor(buf1, (1, 4, 4), (16, 4, 1), 0), buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.jit
import torch.jit.quantized
import torch.onnx.operators
class WordPredictor(nn.Module):
def __init__(self, encoder_output_dim, hidden_dim, output_dim,
topk_labels_per_source_token=None, use_self_attention=False):
super().__init__()
self.encoder_output_dim = encoder_output_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.topk_labels_per_source_token = topk_labels_per_source_token
self.use_self_attention = use_self_attention
if self.use_self_attention:
self.init_layer = nn.Linear(encoder_output_dim, encoder_output_dim)
self.attn_layer = nn.Linear(2 * encoder_output_dim, 1)
self.hidden_layer = nn.Linear(2 * encoder_output_dim, hidden_dim)
self.output_layer = nn.Linear(hidden_dim, output_dim)
else:
self.hidden_layer = nn.Linear(encoder_output_dim, hidden_dim)
self.output_layer = nn.Linear(hidden_dim, output_dim)
def forward(self, encoder_output):
encoder_hiddens, *_ = encoder_output
assert encoder_hiddens.dim()
if self.use_self_attention:
init_state = self._get_init_state(encoder_hiddens)
attn_scores = self._attention(encoder_hiddens, init_state)
attned_state = (encoder_hiddens * attn_scores).sum(0)
pred_input = torch.cat([init_state, attned_state], 1)
pred_hidden = F.relu(self.hidden_layer(pred_input))
logits = self.output_layer(pred_hidden)
else:
hidden = F.relu(self.hidden_layer(encoder_hiddens))
mean_hidden = torch.mean(hidden, 0)
max_hidden = torch.max(hidden, 0)[0]
logits = self.output_layer(mean_hidden + max_hidden)
return logits
def _get_init_state(self, encoder_hiddens):
x = torch.mean(encoder_hiddens, 0)
x = F.relu(self.init_layer(x))
return x
def _attention(self, encoder_hiddens, init_state):
init_state = init_state.unsqueeze(0).expand_as(encoder_hiddens)
attn_input = torch.cat([init_state, encoder_hiddens], 2)
attn_scores = F.relu(self.attn_layer(attn_input))
attn_scores = F.softmax(attn_scores, 0)
return attn_scores
def get_normalized_probs(self, net_output, log_probs):
"""Get normalized probabilities (or log probs) from a net's output."""
logits = net_output
if log_probs:
return F.log_softmax(logits, dim=1)
else:
return F.softmax(logits, dim=1)
def get_topk_predicted_tokens(self, net_output, src_tokens, log_probs:
'bool'):
"""
Get self.topk_labels_per_source_token top predicted words for vocab
reduction (per source token).
"""
assert isinstance(self.topk_labels_per_source_token, int
) and self.topk_labels_per_source_token > 0, 'topk_labels_per_source_token must be a positive int, or None'
k = src_tokens.size(1) * self.topk_labels_per_source_token
probs = self.get_normalized_probs(net_output, log_probs)
_, topk_indices = torch.topk(probs, k, dim=1)
return topk_indices
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'encoder_output_dim': 4, 'hidden_dim': 4, 'output_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn.functional as F
import torch.nn as nn
import torch.jit
import torch.jit.quantized
import torch.onnx.operators
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_max_mean_relu_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x2), xmask)
tmp23 = tl.load(in_ptr0 + (32 + x2), xmask)
tmp40 = tl.load(in_ptr0 + (48 + x2), xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp5 + tmp1
tmp7 = triton_helpers.maximum(tmp3, tmp6)
tmp8 = tmp4 > tmp7
tmp9 = tmp4 == tmp7
tmp10 = tmp4 != tmp4
tmp11 = tmp7 != tmp7
tmp12 = tmp10 > tmp11
tmp13 = tmp8 | tmp12
tmp14 = tmp10 & tmp11
tmp15 = tmp9 | tmp14
tmp16 = tl.full([1], 0, tl.int64)
tmp17 = tl.full([1], 1, tl.int64)
tmp18 = tmp16 < tmp17
tmp19 = tmp15 & tmp18
tmp20 = tmp13 | tmp19
tmp21 = tl.where(tmp20, tmp4, tmp7)
tmp22 = tl.where(tmp20, tmp16, tmp17)
tmp24 = tmp23 + tmp1
tmp25 = triton_helpers.maximum(tmp3, tmp24)
tmp26 = tmp21 > tmp25
tmp27 = tmp21 == tmp25
tmp28 = tmp21 != tmp21
tmp29 = tmp25 != tmp25
tmp30 = tmp28 > tmp29
tmp31 = tmp26 | tmp30
tmp32 = tmp28 & tmp29
tmp33 = tmp27 | tmp32
tmp34 = tl.full([1], 2, tl.int64)
tmp35 = tmp22 < tmp34
tmp36 = tmp33 & tmp35
tmp37 = tmp31 | tmp36
tmp38 = tl.where(tmp37, tmp21, tmp25)
tmp39 = tl.where(tmp37, tmp22, tmp34)
tmp41 = tmp40 + tmp1
tmp42 = triton_helpers.maximum(tmp3, tmp41)
tmp43 = tmp38 > tmp42
tmp44 = tmp38 == tmp42
tmp45 = tmp38 != tmp38
tmp46 = tmp42 != tmp42
tmp47 = tmp45 > tmp46
tmp48 = tmp43 | tmp47
tmp49 = tmp45 & tmp46
tmp50 = tmp44 | tmp49
tmp51 = tl.full([1], 3, tl.int64)
tmp52 = tmp39 < tmp51
tmp53 = tmp50 & tmp52
tmp54 = tmp48 | tmp53
tl.where(tmp54, tmp38, tmp42)
tmp56 = tl.where(tmp54, tmp39, tmp51)
tmp57 = tmp4 + tmp7
tmp58 = tmp57 + tmp25
tmp59 = tmp58 + tmp42
tmp60 = 4.0
tmp61 = tmp59 / tmp60
tmp62 = triton_helpers.maximum(tmp4, tmp7)
tmp63 = triton_helpers.maximum(tmp62, tmp25)
tmp64 = triton_helpers.maximum(tmp63, tmp42)
tmp65 = tmp61 + tmp64
tl.store(out_ptr0 + x2, tmp56, xmask)
tl.store(out_ptr1 + x2, tmp65, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_max_mean_relu_0[grid(16)](buf0, primals_3,
buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, buf2, reinterpret_tensor(primals_4,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(64)](buf0,
primals_3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf0
del primals_3
return buf3, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), buf2, primals_4, reinterpret_tensor(buf1, (1, 4, 4), (16, 4, 1), 0
), buf4
class WordPredictorNew(nn.Module):
def __init__(self, encoder_output_dim, hidden_dim, output_dim,
topk_labels_per_source_token=None, use_self_attention=False):
super().__init__()
self.encoder_output_dim = encoder_output_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.topk_labels_per_source_token = topk_labels_per_source_token
self.use_self_attention = use_self_attention
if self.use_self_attention:
self.init_layer = nn.Linear(encoder_output_dim, encoder_output_dim)
self.attn_layer = nn.Linear(2 * encoder_output_dim, 1)
self.hidden_layer = nn.Linear(2 * encoder_output_dim, hidden_dim)
self.output_layer = nn.Linear(hidden_dim, output_dim)
else:
self.hidden_layer = nn.Linear(encoder_output_dim, hidden_dim)
self.output_layer = nn.Linear(hidden_dim, output_dim)
def _get_init_state(self, encoder_hiddens):
x = torch.mean(encoder_hiddens, 0)
x = F.relu(self.init_layer(x))
return x
def _attention(self, encoder_hiddens, init_state):
init_state = init_state.unsqueeze(0).expand_as(encoder_hiddens)
attn_input = torch.cat([init_state, encoder_hiddens], 2)
attn_scores = F.relu(self.attn_layer(attn_input))
attn_scores = F.softmax(attn_scores, 0)
return attn_scores
def get_normalized_probs(self, net_output, log_probs):
"""Get normalized probabilities (or log probs) from a net's output."""
logits = net_output
if log_probs:
return F.log_softmax(logits, dim=1)
else:
return F.softmax(logits, dim=1)
def get_topk_predicted_tokens(self, net_output, src_tokens, log_probs:
'bool'):
"""
Get self.topk_labels_per_source_token top predicted words for vocab
reduction (per source token).
"""
assert isinstance(self.topk_labels_per_source_token, int
) and self.topk_labels_per_source_token > 0, 'topk_labels_per_source_token must be a positive int, or None'
k = src_tokens.size(1) * self.topk_labels_per_source_token
probs = self.get_normalized_probs(net_output, log_probs)
_, topk_indices = torch.topk(probs, k, dim=1)
return topk_indices
def forward(self, input_0):
primals_2 = self.hidden_layer.weight
primals_3 = self.hidden_layer.bias
primals_4 = self.output_layer.weight
primals_5 = self.output_layer.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| Jeffyrao/translate | WordPredictor | false | 2,445 | [
"BSD-3-Clause"
] | 0 | ab928e0b692f476c0a43ee7f9d0fbd3ecbada2b4 | https://github.com/Jeffyrao/translate/tree/ab928e0b692f476c0a43ee7f9d0fbd3ecbada2b4 | import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.jit
import torch.jit.quantized
import torch.onnx.operators
class Model(nn.Module):
def __init__(self, encoder_output_dim, hidden_dim, output_dim,
topk_labels_per_source_token=None, use_self_attention=False):
super().__init__()
self.encoder_output_dim = encoder_output_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.topk_labels_per_source_token = topk_labels_per_source_token
self.use_self_attention = use_self_attention
if self.use_self_attention:
self.init_layer = nn.Linear(encoder_output_dim, encoder_output_dim)
self.attn_layer = nn.Linear(2 * encoder_output_dim, 1)
self.hidden_layer = nn.Linear(2 * encoder_output_dim, hidden_dim)
self.output_layer = nn.Linear(hidden_dim, output_dim)
else:
self.hidden_layer = nn.Linear(encoder_output_dim, hidden_dim)
self.output_layer = nn.Linear(hidden_dim, output_dim)
def forward(self, encoder_output):
encoder_hiddens, *_ = encoder_output
assert encoder_hiddens.dim()
if self.use_self_attention:
init_state = self._get_init_state(encoder_hiddens)
attn_scores = self._attention(encoder_hiddens, init_state)
attned_state = (encoder_hiddens * attn_scores).sum(0)
pred_input = torch.cat([init_state, attned_state], 1)
pred_hidden = F.relu(self.hidden_layer(pred_input))
logits = self.output_layer(pred_hidden)
else:
hidden = F.relu(self.hidden_layer(encoder_hiddens))
mean_hidden = torch.mean(hidden, 0)
max_hidden = torch.max(hidden, 0)[0]
logits = self.output_layer(mean_hidden + max_hidden)
return logits
def _get_init_state(self, encoder_hiddens):
x = torch.mean(encoder_hiddens, 0)
x = F.relu(self.init_layer(x))
return x
def _attention(self, encoder_hiddens, init_state):
init_state = init_state.unsqueeze(0).expand_as(encoder_hiddens)
attn_input = torch.cat([init_state, encoder_hiddens], 2)
attn_scores = F.relu(self.attn_layer(attn_input))
attn_scores = F.softmax(attn_scores, 0)
return attn_scores
def get_normalized_probs(self, net_output, log_probs):
"""Get normalized probabilities (or log probs) from a net's output."""
logits = net_output
if log_probs:
return F.log_softmax(logits, dim=1)
else:
return F.softmax(logits, dim=1)
def get_topk_predicted_tokens(self, net_output, src_tokens, log_probs:
'bool'):
"""
Get self.topk_labels_per_source_token top predicted words for vocab
reduction (per source token).
"""
assert isinstance(self.topk_labels_per_source_token, int
) and self.topk_labels_per_source_token > 0, 'topk_labels_per_source_token must be a positive int, or None'
k = src_tokens.size(1) * self.topk_labels_per_source_token
probs = self.get_normalized_probs(net_output, log_probs)
_, topk_indices = torch.topk(probs, k, dim=1)
return topk_indices
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
BasicBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/xs/cxsusoiimcsb2zqt6msuytzn4a7beumw4dwrmumxzmw43s6usbsr.py
# Topologically Sorted Source Nodes: [batch_norm], Original ATen: [aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# batch_norm => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%convolution, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_per_fused__native_batch_norm_legit_0 = async_compile.triton('triton_per_fused__native_batch_norm_legit_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex % 16
r2 = (rindex // 16)
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0) + (64*r2)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 64.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tl.store(out_ptr2 + (x0), tmp21, xmask)
tl.store(out_ptr0 + (x0), tmp10, xmask)
tl.store(out_ptr1 + (x0), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/no/cnomyydbnm2nuezfpivp5nwa25532prpfcnhu2qpqucruttgmqor.py
# Topologically Sorted Source Nodes: [batch_norm, out], Original ATen: [aten._native_batch_norm_legit, aten.relu]
# Source node to ATen node mapping:
# batch_norm => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# out => relu
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%convolution, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convolution, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %unsqueeze_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_3), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_1,), kwargs = {})
triton_poi_fused__native_batch_norm_legit_relu_1 = async_compile.triton('triton_poi_fused__native_batch_norm_legit_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__native_batch_norm_legit_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__native_batch_norm_legit_relu_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 64.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + (x3), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/um/cumimxu45muhlchm3x37rigjoh4ikg4gv33wngnravn6y4gzwph4.py
# Topologically Sorted Source Nodes: [out_1, out_2, out_3], Original ATen: [aten._native_batch_norm_legit, aten.add, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_1 => add_2, add_3, mul_2, mul_3, rsqrt_1, sub_1, var_mean_1
# out_2 => add_4
# out_3 => relu_1
# Graph fragment:
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%convolution_1, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convolution_1, %getitem_3), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt_1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %unsqueeze_5), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %unsqueeze_7), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %primals_2), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_4,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused__native_batch_norm_legit_add_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused__native_batch_norm_legit_add_relu_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*i1', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__native_batch_norm_legit_add_relu_threshold_backward_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__native_batch_norm_legit_add_relu_threshold_backward_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr5 + (x3), xmask)
tmp2 = tmp0 - tmp1
tmp4 = 64.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = tl.full([1], 0, tl.int32)
tmp17 = triton_helpers.maximum(tmp16, tmp15)
tmp18 = 0.0
tmp19 = tmp17 <= tmp18
tl.store(out_ptr0 + (x3), tmp17, xmask)
tl.store(out_ptr1 + (x3), tmp19, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32)
buf2 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32)
buf4 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm], Original ATen: [aten._native_batch_norm_legit]
stream0 = get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_0.run(buf0, buf1, buf2, buf4, 4, 64, grid=grid(4), stream=stream0)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm, out], Original ATen: [aten._native_batch_norm_legit, aten.relu]
triton_poi_fused__native_batch_norm_legit_relu_1.run(buf0, buf1, buf2, primals_3, primals_4, buf5, 256, grid=grid(256), stream=stream0)
del primals_4
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
buf7 = buf2; del buf2 # reuse
buf8 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32)
buf10 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_0.run(buf6, buf7, buf8, buf10, 4, 64, grid=grid(4), stream=stream0)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_1, out_2, out_3], Original ATen: [aten._native_batch_norm_legit, aten.add, aten.relu, aten.threshold_backward]
triton_poi_fused__native_batch_norm_legit_add_relu_threshold_backward_2.run(buf6, buf7, buf8, primals_6, primals_7, primals_2, buf11, buf12, 256, grid=grid(256), stream=stream0)
del buf8
del primals_7
return (buf11, buf5, primals_1, primals_2, primals_3, primals_5, primals_6, buf0, reinterpret_tensor(buf4, (4, ), (1, ), 0), buf5, buf6, reinterpret_tensor(buf10, (4, ), (1, ), 0), buf12, reinterpret_tensor(buf7, (1, 4, 1, 1), (4, 1, 1, 1), 0), reinterpret_tensor(buf1, (1, 4, 1, 1), (4, 1, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch.nn as nn
from collections import OrderedDict
from torch.nn.functional import relu
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(in_planes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, track_running_stats=False)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, track_running_stats=False)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self.
expansion * planes, kernel_size=1, stride=stride, bias=
False), nn.BatchNorm2d(self.expansion * planes,
track_running_stats=False))
self.act = OrderedDict()
self.count = 0
def forward(self, x):
self.count = self.count % 2
self.act['conv_{}'.format(self.count)] = x
self.count += 1
out = relu(self.bn1(self.conv1(x)))
self.count = self.count % 2
self.act['conv_{}'.format(self.count)] = out
self.count += 1
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = relu(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_planes': 4, 'planes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
import torch.nn as nn
from collections import OrderedDict
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__native_batch_norm_legit_0(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex % 16
r2 = rindex // 16
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0 + 64 * r2), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 64.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tl.store(out_ptr2 + x0, tmp21, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
tl.store(out_ptr1 + x0, tmp16, xmask)
@triton.jit
def triton_poi_fused__native_batch_norm_legit_relu_1(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 64.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, xmask)
@triton.jit
def triton_poi_fused__native_batch_norm_legit_add_relu_threshold_backward_2(
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr5 + x3, xmask)
tmp2 = tmp0 - tmp1
tmp4 = 64.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = tl.full([1], 0, tl.int32)
tmp17 = triton_helpers.maximum(tmp16, tmp15)
tmp18 = 0.0
tmp19 = tmp17 <= tmp18
tl.store(out_ptr0 + x3, tmp17, xmask)
tl.store(out_ptr1 + x3, tmp19, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32)
buf2 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32)
buf4 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32)
get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_0[grid(4)](buf0, buf1,
buf2, buf4, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__native_batch_norm_legit_relu_1[grid(256)](buf0,
buf1, buf2, primals_3, primals_4, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_4
buf6 = extern_kernels.convolution(buf5, primals_5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
buf7 = buf2
del buf2
buf8 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32)
buf10 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32)
triton_per_fused__native_batch_norm_legit_0[grid(4)](buf6, buf7,
buf8, buf10, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused__native_batch_norm_legit_add_relu_threshold_backward_2[
grid(256)](buf6, buf7, buf8, primals_6, primals_7, primals_2,
buf11, buf12, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf8
del primals_7
return (buf11, buf5, primals_1, primals_2, primals_3, primals_5,
primals_6, buf0, reinterpret_tensor(buf4, (4,), (1,), 0), buf5,
buf6, reinterpret_tensor(buf10, (4,), (1,), 0), buf12,
reinterpret_tensor(buf7, (1, 4, 1, 1), (4, 1, 1, 1), 0),
reinterpret_tensor(buf1, (1, 4, 1, 1), (4, 1, 1, 1), 0))
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlockNew(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlockNew, self).__init__()
self.conv1 = conv3x3(in_planes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, track_running_stats=False)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, track_running_stats=False)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self.
expansion * planes, kernel_size=1, stride=stride, bias=
False), nn.BatchNorm2d(self.expansion * planes,
track_running_stats=False))
self.act = OrderedDict()
self.count = 0
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_3 = self.bn1.weight
primals_4 = self.bn1.bias
primals_5 = self.conv2.weight
primals_6 = self.bn2.weight
primals_7 = self.bn2.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| JunLi-Galios/GPM | BasicBlock | false | 2,446 | [
"MIT"
] | 0 | 9ea62c52ec5ae09de185fa66b1262e31c90d82a6 | https://github.com/JunLi-Galios/GPM/tree/9ea62c52ec5ae09de185fa66b1262e31c90d82a6 | import torch
import torch.utils.data
import torch.nn as nn
from collections import OrderedDict
from torch.nn.functional import relu
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class Model(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super().__init__()
self.conv1 = conv3x3(in_planes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, track_running_stats=False)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, track_running_stats=False)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self.
expansion * planes, kernel_size=1, stride=stride, bias=
False), nn.BatchNorm2d(self.expansion * planes,
track_running_stats=False))
self.act = OrderedDict()
self.count = 0
def forward(self, x):
self.count = self.count % 2
self.act['conv_{}'.format(self.count)] = x
self.count += 1
out = relu(self.bn1(self.conv1(x)))
self.count = self.count % 2
self.act['conv_{}'.format(self.count)] = out
self.count += 1
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = relu(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
BasicConv2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/z3/cz3vliqlpgih6ihwoaxl6cmnicfmv2ygutcuphilcsragp3evc57.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => convolution
# x_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0.run(buf1, primals_2, buf2, 16, grid=grid(16), stream=stream0)
del primals_2
return (buf1, primals_1, primals_3, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class BasicConv2d(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, **kwargs)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.relu(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0[grid(16)](buf1,
primals_2, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return buf1, primals_1, primals_3, buf2
class BasicConv2dNew(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(BasicConv2dNew, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, **kwargs)
self.relu = nn.ReLU(inplace=True)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| K-ona/template | BasicConv2d | false | 2,447 | [
"Apache-2.0"
] | 0 | a9ea81695b8d7eb512ac7bd54c76f14c7dcb30c4 | https://github.com/K-ona/template/tree/a9ea81695b8d7eb512ac7bd54c76f14c7dcb30c4 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super().__init__()
self.conv = nn.Conv2d(in_channels, out_channels, **kwargs)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.relu(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
Downsample | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/cu/ccutvo2v4333pq6xhrg2zryqqwthm7dmmuqprvva2xdwiodpz5jn.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_3, 64, grid=grid(64), stream=stream0)
del primals_3
return (buf1, primals_1, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.model_zoo
def avg_pool_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D average pooling module.
"""
if dims == 1:
return nn.AvgPool1d(*args, **kwargs)
elif dims == 2:
return nn.AvgPool2d(*args, **kwargs)
elif dims == 3:
return nn.AvgPool3d(*args, **kwargs)
raise ValueError(f'unsupported dimensions: {dims}')
def conv_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D convolution module.
"""
if dims == 1:
return nn.Conv1d(*args, **kwargs)
elif dims == 2:
return nn.Conv2d(*args, **kwargs)
elif dims == 3:
return nn.Conv3d(*args, **kwargs)
raise ValueError(f'unsupported dimensions: {dims}')
class Downsample(nn.Module):
"""
A downsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
downsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
stride = 2 if dims != 3 else (1, 2, 2)
if use_conv:
self.op = conv_nd(dims, self.channels, self.out_channels, 3,
stride=stride, padding=1)
else:
assert self.channels == self.out_channels
self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
def forward(self, x):
assert x.shape[1] == self.channels
return self.op(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4, 'use_conv': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.model_zoo
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(2,
2), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(64)](buf1, primals_3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
return buf1, primals_1, primals_2
def avg_pool_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D average pooling module.
"""
if dims == 1:
return nn.AvgPool1d(*args, **kwargs)
elif dims == 2:
return nn.AvgPool2d(*args, **kwargs)
elif dims == 3:
return nn.AvgPool3d(*args, **kwargs)
raise ValueError(f'unsupported dimensions: {dims}')
def conv_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D convolution module.
"""
if dims == 1:
return nn.Conv1d(*args, **kwargs)
elif dims == 2:
return nn.Conv2d(*args, **kwargs)
elif dims == 3:
return nn.Conv3d(*args, **kwargs)
raise ValueError(f'unsupported dimensions: {dims}')
class DownsampleNew(nn.Module):
"""
A downsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
downsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
stride = 2 if dims != 3 else (1, 2, 2)
if use_conv:
self.op = conv_nd(dims, self.channels, self.out_channels, 3,
stride=stride, padding=1)
else:
assert self.channels == self.out_channels
self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
def forward(self, input_0):
primals_2 = self.op.weight
primals_3 = self.op.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| KamilDeja/guided-diffusion | Downsample | false | 2,448 | [
"MIT"
] | 0 | d0eeeb4637379a3ece40c4dd38ccdf5d8ed5e837 | https://github.com/KamilDeja/guided-diffusion/tree/d0eeeb4637379a3ece40c4dd38ccdf5d8ed5e837 | import torch
import torch.nn as nn
import torch.utils.model_zoo
def avg_pool_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D average pooling module.
"""
if dims == 1:
return nn.AvgPool1d(*args, **kwargs)
elif dims == 2:
return nn.AvgPool2d(*args, **kwargs)
elif dims == 3:
return nn.AvgPool3d(*args, **kwargs)
raise ValueError(f'unsupported dimensions: {dims}')
def conv_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D convolution module.
"""
if dims == 1:
return nn.Conv1d(*args, **kwargs)
elif dims == 2:
return nn.Conv2d(*args, **kwargs)
elif dims == 3:
return nn.Conv3d(*args, **kwargs)
raise ValueError(f'unsupported dimensions: {dims}')
class Model(nn.Module):
"""
A downsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
downsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
stride = 2 if dims != 3 else (1, 2, 2)
if use_conv:
self.op = conv_nd(dims, self.channels, self.out_channels, 3,
stride=stride, padding=1)
else:
assert self.channels == self.out_channels
self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
def forward(self, x):
assert x.shape[1] == self.channels
return self.op(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
LossPredLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/no/cno5volk73zd2bft3tqji7bjbkwnhv7zy2scykviixz5rie5viyd.py
# Topologically Sorted Source Nodes: [clamp, sign, mul, one, mul_1, sub_3, clamp_1, loss, loss_1], Original ATen: [aten.clamp, aten.sign, aten.mul, aten.sub, aten.rsub, aten.sum, aten.div]
# Source node to ATen node mapping:
# clamp => clamp_min
# clamp_1 => clamp_min_1
# loss => sum_1
# loss_1 => div
# mul => mul
# mul_1 => mul_1
# one => sub_2
# sign => sign
# sub_3 => sub_3
# Graph fragment:
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%slice_2, 0), kwargs = {})
# %sign : [num_users=1] = call_function[target=torch.ops.aten.sign.default](args = (%clamp_min,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sign, 2), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, 1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %slice_1), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %mul_1), kwargs = {})
# %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_3, 0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%clamp_min_1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, 2), kwargs = {})
triton_per_fused_clamp_div_mul_rsub_sign_sub_sum_0 = async_compile.triton('triton_per_fused_clamp_div_mul_rsub_sign_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 128],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_clamp_div_mul_rsub_sign_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_clamp_div_mul_rsub_sign_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 128
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
r0 = rindex % 64
r1 = (rindex // 64)
tmp0 = tl.load(in_ptr0 + (r2), None)
tmp1 = tl.load(in_ptr0 + (192 + r0 + ((-64)*r1)), None)
tmp16 = tl.load(in_ptr1 + (r2), None)
tmp17 = tl.load(in_ptr1 + (192 + r0 + ((-64)*r1)), None)
tmp2 = tmp0 - tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = tl.full([1, 1], 0, tl.int32)
tmp6 = tmp5 < tmp4
tmp7 = tmp6.to(tl.int8)
tmp8 = tmp4 < tmp5
tmp9 = tmp8.to(tl.int8)
tmp10 = tmp7 - tmp9
tmp11 = tmp10.to(tmp4.dtype)
tmp12 = 2.0
tmp13 = tmp11 * tmp12
tmp14 = 1.0
tmp15 = tmp13 - tmp14
tmp18 = tmp16 - tmp17
tmp19 = tmp15 * tmp18
tmp20 = tmp14 - tmp19
tmp21 = triton_helpers.maximum(tmp20, tmp3)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK])
tmp24 = tl.sum(tmp22, 1)[:, None]
tmp25 = 0.5
tmp26 = tmp24 * tmp25
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp26, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [clamp, sign, mul, one, mul_1, sub_3, clamp_1, loss, loss_1], Original ATen: [aten.clamp, aten.sign, aten.mul, aten.sub, aten.rsub, aten.sum, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_clamp_div_mul_rsub_sign_sub_sum_0.run(buf1, arg1_1, arg0_1, 1, 128, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class LossPredLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, pred_loss, target_loss):
pred_loss = (pred_loss - pred_loss.flip(0))[:len(pred_loss) // 2]
target_loss = (target_loss - target_loss.flip(0))[:len(target_loss) //
2]
target_loss = target_loss.detach()
one = 2 * torch.sign(torch.clamp(target_loss, min=0)) - 1
loss = torch.sum(torch.clamp(1.0 - one * pred_loss, min=0))
loss = loss / pred_loss.size(0)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_clamp_div_mul_rsub_sign_sub_sum_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
r0 = rindex % 64
r1 = rindex // 64
tmp0 = tl.load(in_ptr0 + r2, None)
tmp1 = tl.load(in_ptr0 + (192 + r0 + -64 * r1), None)
tmp16 = tl.load(in_ptr1 + r2, None)
tmp17 = tl.load(in_ptr1 + (192 + r0 + -64 * r1), None)
tmp2 = tmp0 - tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = tl.full([1, 1], 0, tl.int32)
tmp6 = tmp5 < tmp4
tmp7 = tmp6.to(tl.int8)
tmp8 = tmp4 < tmp5
tmp9 = tmp8.to(tl.int8)
tmp10 = tmp7 - tmp9
tmp11 = tmp10.to(tmp4.dtype)
tmp12 = 2.0
tmp13 = tmp11 * tmp12
tmp14 = 1.0
tmp15 = tmp13 - tmp14
tmp18 = tmp16 - tmp17
tmp19 = tmp15 * tmp18
tmp20 = tmp14 - tmp19
tmp21 = triton_helpers.maximum(tmp20, tmp3)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK])
tmp24 = tl.sum(tmp22, 1)[:, None]
tmp25 = 0.5
tmp26 = tmp24 * tmp25
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp26, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_clamp_div_mul_rsub_sign_sub_sum_0[grid(1)](buf1,
arg1_1, arg0_1, 1, 128, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class LossPredLossNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| KMU-AELAB/Active_Learning | LossPredLoss | false | 2,449 | [
"MIT"
] | 0 | bc569c16b5f12b58989a8f3db59b7eb4e35cce1b | https://github.com/KMU-AELAB/Active_Learning/tree/bc569c16b5f12b58989a8f3db59b7eb4e35cce1b | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, pred_loss, target_loss):
pred_loss = (pred_loss - pred_loss.flip(0))[:len(pred_loss) // 2]
target_loss = (target_loss - target_loss.flip(0))[:len(target_loss) //
2]
target_loss = target_loss.detach()
one = 2 * torch.sign(torch.clamp(target_loss, min=0)) - 1
loss = torch.sum(torch.clamp(1.0 - one * pred_loss, min=0))
loss = loss / pred_loss.size(0)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
CodeLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/fr/cfrb645vkznmw7ooldvqafzhlfzyxdmsabwmfqp5bym34xyzoiuu.py
# Topologically Sorted Source Nodes: [origin_code, sum_1, abs_1, mean, trans_code, sum_2, abs_2, mean_1, add, code_balance_loss], Original ATen: [aten.sign, aten.sum, aten.abs, aten.mean, aten.add, aten.div]
# Source node to ATen node mapping:
# abs_1 => abs_1
# abs_2 => abs_2
# add => add
# code_balance_loss => div
# mean => mean
# mean_1 => mean_1
# origin_code => sign
# sum_1 => sum_1
# sum_2 => sum_2
# trans_code => sign_1
# Graph fragment:
# %sign : [num_users=2] = call_function[target=torch.ops.aten.sign.default](args = (%arg0_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sign, [1]), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sum_1,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%abs_1,), kwargs = {})
# %sign_1 : [num_users=2] = call_function[target=torch.ops.aten.sign.default](args = (%arg1_1,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sign_1, [1]), kwargs = {})
# %abs_2 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sum_2,), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%abs_2,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, %mean_1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, 2), kwargs = {})
triton_per_fused_abs_add_div_mean_sign_sum_0 = async_compile.triton('triton_per_fused_abs_add_div_mean_sign_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_div_mean_sign_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_add_div_mean_sign_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = (rindex // 16)
tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None)
tmp8 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None)
tmp16 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None)
tmp24 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None)
tmp36 = tl.load(in_ptr1 + (r0 + (64*r1)), None)
tmp43 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None)
tmp51 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None)
tmp59 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None)
tmp1 = tl.full([1, 1], 0, tl.int32)
tmp2 = tmp1 < tmp0
tmp3 = tmp2.to(tl.int8)
tmp4 = tmp0 < tmp1
tmp5 = tmp4.to(tl.int8)
tmp6 = tmp3 - tmp5
tmp7 = tmp6.to(tmp0.dtype)
tmp9 = tmp1 < tmp8
tmp10 = tmp9.to(tl.int8)
tmp11 = tmp8 < tmp1
tmp12 = tmp11.to(tl.int8)
tmp13 = tmp10 - tmp12
tmp14 = tmp13.to(tmp8.dtype)
tmp15 = tmp7 + tmp14
tmp17 = tmp1 < tmp16
tmp18 = tmp17.to(tl.int8)
tmp19 = tmp16 < tmp1
tmp20 = tmp19.to(tl.int8)
tmp21 = tmp18 - tmp20
tmp22 = tmp21.to(tmp16.dtype)
tmp23 = tmp15 + tmp22
tmp25 = tmp1 < tmp24
tmp26 = tmp25.to(tl.int8)
tmp27 = tmp24 < tmp1
tmp28 = tmp27.to(tl.int8)
tmp29 = tmp26 - tmp28
tmp30 = tmp29.to(tmp24.dtype)
tmp31 = tmp23 + tmp30
tmp32 = tl_math.abs(tmp31)
tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK])
tmp35 = tl.sum(tmp33, 1)[:, None]
tmp37 = tmp1 < tmp36
tmp38 = tmp37.to(tl.int8)
tmp39 = tmp36 < tmp1
tmp40 = tmp39.to(tl.int8)
tmp41 = tmp38 - tmp40
tmp42 = tmp41.to(tmp36.dtype)
tmp44 = tmp1 < tmp43
tmp45 = tmp44.to(tl.int8)
tmp46 = tmp43 < tmp1
tmp47 = tmp46.to(tl.int8)
tmp48 = tmp45 - tmp47
tmp49 = tmp48.to(tmp43.dtype)
tmp50 = tmp42 + tmp49
tmp52 = tmp1 < tmp51
tmp53 = tmp52.to(tl.int8)
tmp54 = tmp51 < tmp1
tmp55 = tmp54.to(tl.int8)
tmp56 = tmp53 - tmp55
tmp57 = tmp56.to(tmp51.dtype)
tmp58 = tmp50 + tmp57
tmp60 = tmp1 < tmp59
tmp61 = tmp60.to(tl.int8)
tmp62 = tmp59 < tmp1
tmp63 = tmp62.to(tl.int8)
tmp64 = tmp61 - tmp63
tmp65 = tmp64.to(tmp59.dtype)
tmp66 = tmp58 + tmp65
tmp67 = tl_math.abs(tmp66)
tmp68 = tl.broadcast_to(tmp67, [XBLOCK, RBLOCK])
tmp70 = tl.sum(tmp68, 1)[:, None]
tmp71 = 64.0
tmp72 = tmp35 / tmp71
tmp73 = tmp70 / tmp71
tmp74 = tmp72 + tmp73
tmp75 = 0.5
tmp76 = tmp74 * tmp75
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp76, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/el/celuuec6lck7idayrkstn2ksyecjnv6f76kxurv62g267qgyw5pr.py
# Topologically Sorted Source Nodes: [origin_code, trans_code, code_loss], Original ATen: [aten.sign, aten.mse_loss]
# Source node to ATen node mapping:
# code_loss => mean_2, pow_1, sub
# origin_code => sign
# trans_code => sign_1
# Graph fragment:
# %sign : [num_users=2] = call_function[target=torch.ops.aten.sign.default](args = (%arg0_1,), kwargs = {})
# %sign_1 : [num_users=2] = call_function[target=torch.ops.aten.sign.default](args = (%arg1_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sign_1, %sign), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
triton_per_fused_mse_loss_sign_1 = async_compile.triton('triton_per_fused_mse_loss_sign_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mse_loss_sign_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mse_loss_sign_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp8 = tl.load(in_ptr1 + (r0), None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp1 < tmp0
tmp3 = tmp2.to(tl.int8)
tmp4 = tmp0 < tmp1
tmp5 = tmp4.to(tl.int8)
tmp6 = tmp3 - tmp5
tmp7 = tmp6.to(tmp0.dtype)
tmp9 = tmp1 < tmp8
tmp10 = tmp9.to(tl.int8)
tmp11 = tmp8 < tmp1
tmp12 = tmp11.to(tl.int8)
tmp13 = tmp10 - tmp12
tmp14 = tmp13.to(tmp8.dtype)
tmp15 = tmp7 - tmp14
tmp16 = tmp15 * tmp15
tmp17 = tl.broadcast_to(tmp16, [RBLOCK])
tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0))
tmp20 = 256.0
tmp21 = tmp19 / tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp21, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [origin_code, sum_1, abs_1, mean, trans_code, sum_2, abs_2, mean_1, add, code_balance_loss], Original ATen: [aten.sign, aten.sum, aten.abs, aten.mean, aten.add, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_abs_add_div_mean_sign_sum_0.run(buf3, arg0_1, arg1_1, 1, 64, grid=grid(1), stream=stream0)
buf2 = empty_strided_cuda((), (), torch.float32)
buf4 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [origin_code, trans_code, code_loss], Original ATen: [aten.sign, aten.mse_loss]
triton_per_fused_mse_loss_sign_1.run(buf4, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf3, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class CodeLoss(nn.Module):
def __init__(self):
super().__init__()
self.loss = nn.MSELoss()
def forward(self, origin_logit, trans_logit):
origin_code, trans_code = torch.sign(origin_logit), torch.sign(
trans_logit)
code_balance_loss = (torch.mean(torch.abs(torch.sum(origin_code,
dim=1))) + torch.mean(torch.abs(torch.sum(trans_code, dim=1)))) / 2
code_loss = self.loss(trans_code, origin_code.detach())
return code_balance_loss, code_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_add_div_mean_sign_sum_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp8 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp16 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp24 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp36 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp43 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp51 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp59 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp1 = tl.full([1, 1], 0, tl.int32)
tmp2 = tmp1 < tmp0
tmp3 = tmp2.to(tl.int8)
tmp4 = tmp0 < tmp1
tmp5 = tmp4.to(tl.int8)
tmp6 = tmp3 - tmp5
tmp7 = tmp6.to(tmp0.dtype)
tmp9 = tmp1 < tmp8
tmp10 = tmp9.to(tl.int8)
tmp11 = tmp8 < tmp1
tmp12 = tmp11.to(tl.int8)
tmp13 = tmp10 - tmp12
tmp14 = tmp13.to(tmp8.dtype)
tmp15 = tmp7 + tmp14
tmp17 = tmp1 < tmp16
tmp18 = tmp17.to(tl.int8)
tmp19 = tmp16 < tmp1
tmp20 = tmp19.to(tl.int8)
tmp21 = tmp18 - tmp20
tmp22 = tmp21.to(tmp16.dtype)
tmp23 = tmp15 + tmp22
tmp25 = tmp1 < tmp24
tmp26 = tmp25.to(tl.int8)
tmp27 = tmp24 < tmp1
tmp28 = tmp27.to(tl.int8)
tmp29 = tmp26 - tmp28
tmp30 = tmp29.to(tmp24.dtype)
tmp31 = tmp23 + tmp30
tmp32 = tl_math.abs(tmp31)
tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK])
tmp35 = tl.sum(tmp33, 1)[:, None]
tmp37 = tmp1 < tmp36
tmp38 = tmp37.to(tl.int8)
tmp39 = tmp36 < tmp1
tmp40 = tmp39.to(tl.int8)
tmp41 = tmp38 - tmp40
tmp42 = tmp41.to(tmp36.dtype)
tmp44 = tmp1 < tmp43
tmp45 = tmp44.to(tl.int8)
tmp46 = tmp43 < tmp1
tmp47 = tmp46.to(tl.int8)
tmp48 = tmp45 - tmp47
tmp49 = tmp48.to(tmp43.dtype)
tmp50 = tmp42 + tmp49
tmp52 = tmp1 < tmp51
tmp53 = tmp52.to(tl.int8)
tmp54 = tmp51 < tmp1
tmp55 = tmp54.to(tl.int8)
tmp56 = tmp53 - tmp55
tmp57 = tmp56.to(tmp51.dtype)
tmp58 = tmp50 + tmp57
tmp60 = tmp1 < tmp59
tmp61 = tmp60.to(tl.int8)
tmp62 = tmp59 < tmp1
tmp63 = tmp62.to(tl.int8)
tmp64 = tmp61 - tmp63
tmp65 = tmp64.to(tmp59.dtype)
tmp66 = tmp58 + tmp65
tmp67 = tl_math.abs(tmp66)
tmp68 = tl.broadcast_to(tmp67, [XBLOCK, RBLOCK])
tmp70 = tl.sum(tmp68, 1)[:, None]
tmp71 = 64.0
tmp72 = tmp35 / tmp71
tmp73 = tmp70 / tmp71
tmp74 = tmp72 + tmp73
tmp75 = 0.5
tmp76 = tmp74 * tmp75
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp76, None)
@triton.jit
def triton_per_fused_mse_loss_sign_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp8 = tl.load(in_ptr1 + r0, None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp1 < tmp0
tmp3 = tmp2.to(tl.int8)
tmp4 = tmp0 < tmp1
tmp5 = tmp4.to(tl.int8)
tmp6 = tmp3 - tmp5
tmp7 = tmp6.to(tmp0.dtype)
tmp9 = tmp1 < tmp8
tmp10 = tmp9.to(tl.int8)
tmp11 = tmp8 < tmp1
tmp12 = tmp11.to(tl.int8)
tmp13 = tmp10 - tmp12
tmp14 = tmp13.to(tmp8.dtype)
tmp15 = tmp7 - tmp14
tmp16 = tmp15 * tmp15
tmp17 = tl.broadcast_to(tmp16, [RBLOCK])
tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0))
tmp20 = 256.0
tmp21 = tmp19 / tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_add_div_mean_sign_sum_0[grid(1)](buf3, arg0_1,
arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf2 = empty_strided_cuda((), (), torch.float32)
buf4 = buf2
del buf2
triton_per_fused_mse_loss_sign_1[grid(1)](buf4, arg1_1, arg0_1, 1,
256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf3, buf4
class CodeLossNew(nn.Module):
def __init__(self):
super().__init__()
self.loss = nn.MSELoss()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0], output[1]
| KMU-AELAB/Active_Learning | CodeLoss | false | 2,450 | [
"MIT"
] | 0 | bc569c16b5f12b58989a8f3db59b7eb4e35cce1b | https://github.com/KMU-AELAB/Active_Learning/tree/bc569c16b5f12b58989a8f3db59b7eb4e35cce1b | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
self.loss = nn.MSELoss()
def forward(self, origin_logit, trans_logit):
origin_code, trans_code = torch.sign(origin_logit), torch.sign(
trans_logit)
code_balance_loss = (torch.mean(torch.abs(torch.sum(origin_code,
dim=1))) + torch.mean(torch.abs(torch.sum(trans_code, dim=1)))) / 2
code_loss = self.loss(trans_code, origin_code.detach())
return code_balance_loss, code_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
ConvBlockFixup | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ny/cnyftzt44marcm3rjc5ttmk3yz7tgjftwetyjwwa4mdifhwaiq5l.py
# Topologically Sorted Source Nodes: [add, out], Original ATen: [aten.add, aten.constant_pad_nd]
# Source node to ATen node mapping:
# add => add
# out => constant_pad_nd
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %primals_2), kwargs = {})
# %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%add, [0, 0, 0, 1]), kwargs = {})
triton_poi_fused_add_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_add_constant_pad_nd_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_constant_pad_nd_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 5
x2 = (xindex // 20)
x3 = xindex % 20
x4 = xindex
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp0 = x1
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.load(in_ptr0 + (x3 + (16*x2)), tmp2 & xmask, other=0.0)
tmp6 = tmp3 + tmp5
tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype)
tmp8 = tl.where(tmp2, tmp6, tmp7)
tl.store(out_ptr0 + (x4), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/2g/c2geersai56ajl5mszxuccvf32ingsf7uewltdfucegi7nprodd2.py
# Topologically Sorted Source Nodes: [add_1, out_1, add_2, out_2], Original ATen: [aten.add, aten.relu, aten.constant_pad_nd]
# Source node to ATen node mapping:
# add_1 => add_1
# add_2 => add_2
# out_1 => relu
# out_2 => constant_pad_nd_1
# Graph fragment:
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution, %primals_4), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_1,), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu, %primals_5), kwargs = {})
# %constant_pad_nd_1 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%add_2, [0, 0, 0, 1]), kwargs = {})
triton_poi_fused_add_constant_pad_nd_relu_1 = async_compile.triton('triton_poi_fused_add_constant_pad_nd_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_constant_pad_nd_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_constant_pad_nd_relu_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 5
x2 = (xindex // 20)
x3 = xindex % 20
x4 = xindex
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp9 = tl.load(in_ptr2 + (0))
tmp10 = tl.broadcast_to(tmp9, [XBLOCK])
tmp0 = x1
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.load(in_ptr0 + (x3 + (16*x2)), tmp2 & xmask, other=0.0)
tmp6 = tmp3 + tmp5
tmp7 = tl.full([1], 0, tl.int32)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp11 = tmp8 + tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp2, tmp11, tmp12)
tl.store(out_ptr0 + (x4), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ru/cruzz4bw56op4a2umcojvqo7x3yllo524t6ildzprzx27ua6kcm5.py
# Topologically Sorted Source Nodes: [mul, out_3, out_4, out_5], Original ATen: [aten.mul, aten.add, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# mul => mul
# out_3 => add_3
# out_4 => add_4
# out_5 => relu_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, %primals_7), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_8), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %primals_1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_4,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_add_mul_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_add_mul_relu_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*i1', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_relu_threshold_backward_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_relu_threshold_backward_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr3 + (x0), xmask)
tmp3 = tmp0 * tmp2
tmp6 = tmp3 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp11 = 0.0
tmp12 = tmp10 <= tmp11
tl.store(out_ptr0 + (x0), tmp10, xmask)
tl.store(out_ptr1 + (x0), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/cu/ccuuixkigxbxyhnijuiyewzzytgjirr5emo3575tqzbab4hum44n.py
# Topologically Sorted Source Nodes: [add_1, out_1], Original ATen: [aten.add, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# add_1 => add_1
# out_1 => relu
# Graph fragment:
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution, %primals_4), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_add_relu_threshold_backward_3 = async_compile.triton('triton_poi_fused_add_relu_threshold_backward_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_threshold_backward_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = 0.0
tmp7 = tmp5 <= tmp6
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 1), (16, 4, 1, 1))
assert_size_stride(primals_4, (1, ), (1, ))
assert_size_stride(primals_5, (1, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4, 1), (16, 4, 1, 1))
assert_size_stride(primals_7, (1, ), (1, ))
assert_size_stride(primals_8, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 5, 4), (80, 20, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, out], Original ATen: [aten.add, aten.constant_pad_nd]
stream0 = get_raw_stream(0)
triton_poi_fused_add_constant_pad_nd_0.run(primals_1, primals_2, buf0, 320, grid=grid(320), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(1, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 5, 4), (80, 20, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add_1, out_1, add_2, out_2], Original ATen: [aten.add, aten.relu, aten.constant_pad_nd]
triton_poi_fused_add_constant_pad_nd_relu_1.run(buf1, primals_4, primals_5, buf2, 320, grid=grid(320), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_6, stride=(1, 1), padding=(1, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [mul, out_3, out_4, out_5], Original ATen: [aten.mul, aten.add, aten.relu, aten.threshold_backward]
triton_poi_fused_add_mul_relu_threshold_backward_2.run(buf3, primals_7, primals_8, primals_1, buf4, buf5, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_8
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [add_1, out_1], Original ATen: [aten.add, aten.relu, aten.threshold_backward]
triton_poi_fused_add_relu_threshold_backward_3.run(buf1, primals_4, buf6, 256, grid=grid(256), stream=stream0)
del buf1
del primals_4
return (buf4, primals_3, primals_6, primals_7, buf0, buf2, buf3, buf5, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 1), (16, 4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4, 1), (16, 4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class ConvBlockFixup(nn.Module):
def __init__(self, filter_width, input_filters, nb_filters, dilation):
super(ConvBlockFixup, self).__init__()
self.filter_width = filter_width
self.input_filters = input_filters
self.nb_filters = nb_filters
self.dilation = dilation
self.bias1a = nn.Parameter(torch.zeros(1))
self.conv1 = nn.Conv2d(self.input_filters, self.nb_filters, (self.
filter_width, 1), dilation=(self.dilation, 1), bias=False,
padding='same')
self.bias1b = nn.Parameter(torch.zeros(1))
self.relu = nn.ReLU(inplace=True)
self.bias2a = nn.Parameter(torch.zeros(1))
self.conv2 = nn.Conv2d(self.nb_filters, self.nb_filters, (self.
filter_width, 1), dilation=(self.dilation, 1), bias=False,
padding='same')
self.scale = nn.Parameter(torch.ones(1))
self.bias2b = nn.Parameter(torch.zeros(1))
def forward(self, x):
identity = x
out = self.conv1(x + self.bias1a)
out = self.relu(out + self.bias1b)
out = self.conv2(out + self.bias2a)
out = out * self.scale + self.bias2b
out += identity
out = self.relu(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'filter_width': 4, 'input_filters': 4, 'nb_filters': 4,
'dilation': 1}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_constant_pad_nd_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 5
x2 = xindex // 20
x3 = xindex % 20
x4 = xindex
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp0 = x1
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.load(in_ptr0 + (x3 + 16 * x2), tmp2 & xmask, other=0.0)
tmp6 = tmp3 + tmp5
tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype)
tmp8 = tl.where(tmp2, tmp6, tmp7)
tl.store(out_ptr0 + x4, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_constant_pad_nd_relu_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 5
x2 = xindex // 20
x3 = xindex % 20
x4 = xindex
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp9 = tl.load(in_ptr2 + 0)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK])
tmp0 = x1
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.load(in_ptr0 + (x3 + 16 * x2), tmp2 & xmask, other=0.0)
tmp6 = tmp3 + tmp5
tmp7 = tl.full([1], 0, tl.int32)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp11 = tmp8 + tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp2, tmp11, tmp12)
tl.store(out_ptr0 + x4, tmp13, xmask)
@triton.jit
def triton_poi_fused_add_mul_relu_threshold_backward_2(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr3 + x0, xmask)
tmp3 = tmp0 * tmp2
tmp6 = tmp3 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp11 = 0.0
tmp12 = tmp10 <= tmp11
tl.store(out_ptr0 + x0, tmp10, xmask)
tl.store(out_ptr1 + x0, tmp12, xmask)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_3(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = 0.0
tmp7 = tmp5 <= tmp6
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 1), (16, 4, 1, 1))
assert_size_stride(primals_4, (1,), (1,))
assert_size_stride(primals_5, (1,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 1), (16, 4, 1, 1))
assert_size_stride(primals_7, (1,), (1,))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 5, 4), (80, 20, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_constant_pad_nd_0[grid(320)](primals_1,
primals_2, buf0, 320, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1),
padding=(1, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 5, 4), (80, 20, 4, 1), torch.float32)
triton_poi_fused_add_constant_pad_nd_relu_1[grid(320)](buf1,
primals_4, primals_5, buf2, 320, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_5
buf3 = extern_kernels.convolution(buf2, primals_6, stride=(1, 1),
padding=(1, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_mul_relu_threshold_backward_2[grid(256)](buf3,
primals_7, primals_8, primals_1, buf4, buf5, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
del primals_8
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_relu_threshold_backward_3[grid(256)](buf1,
primals_4, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf1
del primals_4
return buf4, primals_3, primals_6, primals_7, buf0, buf2, buf3, buf5, buf6
class ConvBlockFixupNew(nn.Module):
def __init__(self, filter_width, input_filters, nb_filters, dilation):
super(ConvBlockFixupNew, self).__init__()
self.filter_width = filter_width
self.input_filters = input_filters
self.nb_filters = nb_filters
self.dilation = dilation
self.bias1a = nn.Parameter(torch.zeros(1))
self.conv1 = nn.Conv2d(self.input_filters, self.nb_filters, (self.
filter_width, 1), dilation=(self.dilation, 1), bias=False,
padding='same')
self.bias1b = nn.Parameter(torch.zeros(1))
self.relu = nn.ReLU(inplace=True)
self.bias2a = nn.Parameter(torch.zeros(1))
self.conv2 = nn.Conv2d(self.nb_filters, self.nb_filters, (self.
filter_width, 1), dilation=(self.dilation, 1), bias=False,
padding='same')
self.scale = nn.Parameter(torch.ones(1))
self.bias2b = nn.Parameter(torch.zeros(1))
def forward(self, input_0):
primals_2 = self.bias1a
primals_4 = self.bias1b
primals_5 = self.bias2a
primals_7 = self.scale
primals_8 = self.bias2b
primals_3 = self.conv1.weight
primals_6 = self.conv2.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| KartikaySrivadtava/dl-for-har-ea1e9babb2b178cc338dbc72db974325c193c781 | ConvBlockFixup | false | 2,451 | [
"MIT"
] | 0 | f4fa436000a46df80ec083c8e3692cd21787e5b3 | https://github.com/KartikaySrivadtava/dl-for-har-ea1e9babb2b178cc338dbc72db974325c193c781/tree/f4fa436000a46df80ec083c8e3692cd21787e5b3 | import torch
from torch import nn
class Model(nn.Module):
def __init__(self, filter_width, input_filters, nb_filters, dilation):
super().__init__()
self.filter_width = filter_width
self.input_filters = input_filters
self.nb_filters = nb_filters
self.dilation = dilation
self.bias1a = nn.Parameter(torch.zeros(1))
self.conv1 = nn.Conv2d(self.input_filters, self.nb_filters, (self.
filter_width, 1), dilation=(self.dilation, 1), bias=False,
padding='same')
self.bias1b = nn.Parameter(torch.zeros(1))
self.relu = nn.ReLU(inplace=True)
self.bias2a = nn.Parameter(torch.zeros(1))
self.conv2 = nn.Conv2d(self.nb_filters, self.nb_filters, (self.
filter_width, 1), dilation=(self.dilation, 1), bias=False,
padding='same')
self.scale = nn.Parameter(torch.ones(1))
self.bias2b = nn.Parameter(torch.zeros(1))
def forward(self, x):
identity = x
out = self.conv1(x + self.bias1a)
out = self.relu(out + self.bias1b)
out = self.conv2(out + self.bias2a)
out = out * self.scale + self.bias2b
out += identity
out = self.relu(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'filter_width': 4, 'input_filters': 4, 'nb_filters': 4,
'dilation': 1}]
|
Actor | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/hj/chjzotk5iydxvuetxetlv36s7car7cdb24whkuqihxwcy5kkr4o2.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# x_2 => tanh
# Graph fragment:
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_5,), kwargs = {})
triton_poi_fused_tanh_1 = async_compile.triton('triton_poi_fused_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf7, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf6, 256, grid=grid(256), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.tanh]
triton_poi_fused_tanh_1.run(buf5, primals_7, 256, grid=grid(256), stream=stream0)
del primals_7
return (buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(buf3, (64, 4), (4, 1), 0), buf5, primals_6, buf6, primals_4, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Actor(nn.Module):
def __init__(self, n_obs, output_dim, hidden_size, init_w=0.003):
super(Actor, self).__init__()
self.linear1 = nn.Linear(n_obs, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, output_dim)
self.linear3.weight.data.uniform_(-init_w, init_w)
self.linear3.bias.data.uniform_(-init_w, init_w)
def forward(self, x):
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x = torch.tanh(self.linear3(x))
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_obs': 4, 'output_dim': 4, 'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3,
primals_5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused_tanh_1[grid(256)](buf5, primals_7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_7
return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(
buf3, (64, 4), (4, 1), 0), buf5, primals_6, buf6, primals_4, buf7
class ActorNew(nn.Module):
def __init__(self, n_obs, output_dim, hidden_size, init_w=0.003):
super(ActorNew, self).__init__()
self.linear1 = nn.Linear(n_obs, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, output_dim)
self.linear3.weight.data.uniform_(-init_w, init_w)
self.linear3.bias.data.uniform_(-init_w, init_w)
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_6 = self.linear3.weight
primals_7 = self.linear3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| KhalilWong/Learn-RL | Actor | false | 2,452 | [
"MIT"
] | 0 | 9f63c5adafab1413362366d28d8711096ce6648c | https://github.com/KhalilWong/Learn-RL/tree/9f63c5adafab1413362366d28d8711096ce6648c | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, n_obs, output_dim, hidden_size, init_w=0.003):
super().__init__()
self.linear1 = nn.Linear(n_obs, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, output_dim)
self.linear3.weight.data.uniform_(-init_w, init_w)
self.linear3.bias.data.uniform_(-init_w, init_w)
def forward(self, x):
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x = torch.tanh(self.linear3(x))
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
VAE | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/3q/c3qwr2d2rrpjzvnddomnmdy6cwva4hjlvrn2y5epemk4ak3k2m6c.py
# Topologically Sorted Source Nodes: [h1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# h1 => relu
# Graph fragment:
# %add_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_3), kwargs = {})
# %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_2,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 400
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/wc/cwccwpcx37typ43npqql5ch6jg26xdsj4ic4s37clsyqn7fk4mdk.py
# Topologically Sorted Source Nodes: [mul, std, mul_1, z], Original ATen: [aten.mul, aten.exp, aten.add]
# Source node to ATen node mapping:
# mul => mul
# mul_1 => mul_1
# std => exp
# z => add
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%addmm_2, 0.5), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%randn, %exp), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%addmm_1, %mul_1), kwargs = {})
triton_poi_fused_add_exp_mul_1 = async_compile.triton('triton_poi_fused_add_exp_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_exp_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = tl.load(in_ptr2 + (x0), xmask)
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 * tmp5
tmp7 = tmp0 + tmp6
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/hb/chbjjrtszu6f3bhry7ireqcm3ie3twpz5s7g7owb3zuauqhiqcby.py
# Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# sigmoid => sigmoid
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_11), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_sigmoid_2 = async_compile.triton('triton_poi_fused_sigmoid_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 3136
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 784
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 784), (784, 1))
assert_size_stride(primals_2, (400, 784), (784, 1))
assert_size_stride(primals_3, (400, ), (1, ))
assert_size_stride(primals_4, (20, 400), (400, 1))
assert_size_stride(primals_5, (20, ), (1, ))
assert_size_stride(primals_6, (20, 400), (400, 1))
assert_size_stride(primals_7, (20, ), (1, ))
assert_size_stride(primals_8, (400, 20), (20, 1))
assert_size_stride(primals_9, (400, ), (1, ))
assert_size_stride(primals_10, (784, 400), (400, 1))
assert_size_stride(primals_11, (784, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 400), (400, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784, 400), (1, 784), 0), out=buf0)
del primals_2
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [h1], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, primals_3, 1600, grid=grid(1600), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 20), (20, 1), torch.float32)
# Topologically Sorted Source Nodes: [mu], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (400, 20), (1, 400), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 20), (20, 1), torch.float32)
# Topologically Sorted Source Nodes: [logvar], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, buf1, reinterpret_tensor(primals_6, (400, 20), (1, 400), 0), alpha=1, beta=1, out=buf3)
del primals_7
# Topologically Sorted Source Nodes: [eps], Original ATen: [aten.randn_like]
buf4 = torch.ops.aten.randn.default([4, 20], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False)
buf5 = buf4
del buf4
buf6 = empty_strided_cuda((4, 20), (20, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, std, mul_1, z], Original ATen: [aten.mul, aten.exp, aten.add]
triton_poi_fused_add_exp_mul_1.run(buf2, buf5, buf3, buf6, 80, grid=grid(80), stream=stream0)
buf7 = empty_strided_cuda((4, 400), (400, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf6, reinterpret_tensor(primals_8, (20, 400), (1, 20), 0), out=buf7)
buf8 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [h3], Original ATen: [aten.relu]
triton_poi_fused_relu_0.run(buf8, primals_9, 1600, grid=grid(1600), stream=stream0)
del primals_9
buf9 = empty_strided_cuda((4, 784), (784, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf8, reinterpret_tensor(primals_10, (400, 784), (1, 400), 0), out=buf9)
buf10 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_2.run(buf10, primals_11, 3136, grid=grid(3136), stream=stream0)
del primals_11
return (buf10, buf2, buf3, primals_1, buf1, buf3, buf5, buf6, buf8, buf10, primals_10, primals_8, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 784), (784, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((400, 784), (784, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((20, 400), (400, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((20, 400), (400, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((400, 20), (20, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((784, 400), (400, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((784, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
from torch import nn
from torch.nn import functional as F
import torch.nn.parallel
import torch.onnx
import torch.optim
import torch.utils.data.distributed
class VAE(nn.Module):
def __init__(self):
super(VAE, self).__init__()
self.fc1 = nn.Linear(784, 400)
self.fc21 = nn.Linear(400, 20)
self.fc22 = nn.Linear(400, 20)
self.fc3 = nn.Linear(20, 400)
self.fc4 = nn.Linear(400, 784)
def encode(self, x):
h1 = F.relu(self.fc1(x))
return self.fc21(h1), self.fc22(h1)
def reparameterize(self, mu, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return mu + eps * std
def decode(self, z):
h3 = F.relu(self.fc3(z))
return torch.sigmoid(self.fc4(h3))
def forward(self, x):
mu, logvar = self.encode(x.view(-1, 784))
z = self.reparameterize(mu, logvar)
return self.decode(z), mu, logvar
def get_inputs():
return [torch.rand([4, 784])]
def get_init_inputs():
return [[], {}]
| import torch
from torch import device
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.data
from torch import nn
from torch.nn import functional as F
import torch.nn.parallel
import torch.onnx
import torch.optim
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 400
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_exp_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask)
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 * tmp5
tmp7 = tmp0 + tmp6
tl.store(out_ptr0 + x0, tmp7, xmask)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 3136
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 784
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 784), (784, 1))
assert_size_stride(primals_2, (400, 784), (784, 1))
assert_size_stride(primals_3, (400,), (1,))
assert_size_stride(primals_4, (20, 400), (400, 1))
assert_size_stride(primals_5, (20,), (1,))
assert_size_stride(primals_6, (20, 400), (400, 1))
assert_size_stride(primals_7, (20,), (1,))
assert_size_stride(primals_8, (400, 20), (20, 1))
assert_size_stride(primals_9, (400,), (1,))
assert_size_stride(primals_10, (784, 400), (400, 1))
assert_size_stride(primals_11, (784,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 400), (400, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784,
400), (1, 784), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(1600)](buf1, primals_3, 1600, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 20), (20, 1), torch.float32)
extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4,
(400, 20), (1, 400), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 20), (20, 1), torch.float32)
extern_kernels.addmm(primals_7, buf1, reinterpret_tensor(primals_6,
(400, 20), (1, 400), 0), alpha=1, beta=1, out=buf3)
del primals_7
buf4 = torch.ops.aten.randn.default([4, 20], dtype=torch.float32,
device=device(type='cuda', index=0), pin_memory=False)
buf5 = buf4
del buf4
buf6 = empty_strided_cuda((4, 20), (20, 1), torch.float32)
triton_poi_fused_add_exp_mul_1[grid(80)](buf2, buf5, buf3, buf6, 80,
XBLOCK=128, num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((4, 400), (400, 1), torch.float32)
extern_kernels.mm(buf6, reinterpret_tensor(primals_8, (20, 400), (1,
20), 0), out=buf7)
buf8 = buf7
del buf7
triton_poi_fused_relu_0[grid(1600)](buf8, primals_9, 1600, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_9
buf9 = empty_strided_cuda((4, 784), (784, 1), torch.float32)
extern_kernels.mm(buf8, reinterpret_tensor(primals_10, (400, 784),
(1, 400), 0), out=buf9)
buf10 = buf9
del buf9
triton_poi_fused_sigmoid_2[grid(3136)](buf10, primals_11, 3136,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_11
return (buf10, buf2, buf3, primals_1, buf1, buf3, buf5, buf6, buf8,
buf10, primals_10, primals_8, primals_6, primals_4)
class VAENew(nn.Module):
def __init__(self):
super(VAENew, self).__init__()
self.fc1 = nn.Linear(784, 400)
self.fc21 = nn.Linear(400, 20)
self.fc22 = nn.Linear(400, 20)
self.fc3 = nn.Linear(20, 400)
self.fc4 = nn.Linear(400, 784)
def encode(self, x):
h1 = F.relu(self.fc1(x))
return self.fc21(h1), self.fc22(h1)
def reparameterize(self, mu, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return mu + eps * std
def decode(self, z):
h3 = F.relu(self.fc3(z))
return torch.sigmoid(self.fc4(h3))
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc21.weight
primals_5 = self.fc21.bias
primals_6 = self.fc22.weight
primals_7 = self.fc22.bias
primals_8 = self.fc3.weight
primals_9 = self.fc3.bias
primals_10 = self.fc4.weight
primals_11 = self.fc4.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0], output[1], output[2]
| Kabongosalomon/examples | VAE | false | 2,453 | [
"BSD-3-Clause"
] | 0 | c4bdf77ca3687c4a43ae3f50f78f63f041f1a0c8 | https://github.com/Kabongosalomon/examples/tree/c4bdf77ca3687c4a43ae3f50f78f63f041f1a0c8 | import torch
import torch.utils.data
from torch import nn
from torch.nn import functional as F
import torch.nn.parallel
import torch.onnx
import torch.optim
import torch.utils.data.distributed
class Model(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(784, 400)
self.fc21 = nn.Linear(400, 20)
self.fc22 = nn.Linear(400, 20)
self.fc3 = nn.Linear(20, 400)
self.fc4 = nn.Linear(400, 784)
def encode(self, x):
h1 = F.relu(self.fc1(x))
return self.fc21(h1), self.fc22(h1)
def reparameterize(self, mu, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return mu + eps * std
def decode(self, z):
h3 = F.relu(self.fc3(z))
return torch.sigmoid(self.fc4(h3))
def forward(self, x):
mu, logvar = self.encode(x.view(-1, 784))
z = self.reparameterize(mu, logvar)
return self.decode(z), mu, logvar
def get_inputs():
return [torch.rand([4, 784])]
def get_init_inputs():
return []
|
LearnedPositionalEmbedding | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/5i/c5iybmnijeaxq3pumkl5crtkns462pwdrh72bxy4lcvnlh3r4364.py
# Topologically Sorted Source Nodes: [ne, mask, cumsum], Original ATen: [aten.ne, aten._to_copy, aten.cumsum]
# Source node to ATen node mapping:
# cumsum => cumsum
# mask => convert_element_type
# ne => ne
# Graph fragment:
# %ne : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%primals_1, 4), kwargs = {})
# %convert_element_type : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%ne, torch.int32), kwargs = {})
# %cumsum : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%convert_element_type, 1), kwargs = {})
triton_per_fused__to_copy_cumsum_ne_0 = async_compile.triton('triton_per_fused__to_copy_cumsum_ne_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton.jit
def _triton_helper_fn_add0(arg0_0, arg1_0):
tmp0 = arg0_0 + arg1_0
return tmp0
@triton_heuristics.persistent_reduction(
size_hints=[64, 4],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_cumsum_ne_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__to_copy_cumsum_ne_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 16
x1 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x0 + (16*r2) + (64*x1)), xmask, other=0.0)
tmp1 = 4.0
tmp2 = tmp0 != tmp1
tmp3 = tmp2.to(tl.int32)
tmp4 = tmp3.to(tl.int64)
tmp5 = tmp4.to(tl.int64)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp7, = tl.associative_scan((tmp6,), 1, _triton_helper_fn_add0)
tl.store(out_ptr0 + (x0 + (16*r2) + (64*x1)), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ft/cftxaavy7b7scxgnrhfsvfnicvimxnf3kpckow5nzkyed3meyoli.py
# Topologically Sorted Source Nodes: [ne, mask, type_as, incremental_indices, long, positions], Original ATen: [aten.ne, aten._to_copy, aten.mul, aten.add]
# Source node to ATen node mapping:
# incremental_indices => mul
# long => convert_element_type_2
# mask => convert_element_type
# ne => ne
# positions => add
# type_as => convert_element_type_1
# Graph fragment:
# %ne : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%primals_1, 4), kwargs = {})
# %convert_element_type : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%ne, torch.int32), kwargs = {})
# %convert_element_type_1 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%cumsum, torch.int32), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_1, %convert_element_type), kwargs = {})
# %convert_element_type_2 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul, torch.int64), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_2, 4), kwargs = {})
triton_poi_fused__to_copy_add_mul_ne_1 = async_compile.triton('triton_poi_fused__to_copy_add_mul_ne_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_mul_ne_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_mul_ne_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp2 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tmp0.to(tl.int32)
tmp3 = 4.0
tmp4 = tmp2 != tmp3
tmp5 = tmp4.to(tl.int32)
tmp6 = tmp1 * tmp5
tmp7 = tmp6.to(tl.int64)
tmp8 = tl.full([1], 4, tl.int64)
tmp9 = tmp7 + tmp8
tl.store(in_out_ptr0 + (x0), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/in/cinglqnf6mtochspmiolvr3bqay6yiivzgqlihpkdlbd5p4ccw54.py
# Topologically Sorted Source Nodes: [embedding], Original ATen: [aten.embedding]
# Source node to ATen node mapping:
# embedding => embedding
# Graph fragment:
# %embedding : [num_users=1] = call_function[target=torch.ops.aten.embedding.default](args = (%primals_2, %add, 4), kwargs = {})
triton_poi_fused_embedding_2 = async_compile.triton('triton_poi_fused_embedding_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_embedding_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_embedding_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 9, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert(((0 <= tmp4) & (tmp4 < 9)) | ~(xmask), "index out of bounds: 0 <= tmp4 < 9")
tmp6 = tl.load(in_ptr1 + (x0 + (4*tmp4)), xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (9, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int64)
# Topologically Sorted Source Nodes: [ne, mask, cumsum], Original ATen: [aten.ne, aten._to_copy, aten.cumsum]
stream0 = get_raw_stream(0)
triton_per_fused__to_copy_cumsum_ne_0.run(primals_1, buf0, 64, 4, grid=grid(64), stream=stream0)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [ne, mask, type_as, incremental_indices, long, positions], Original ATen: [aten.ne, aten._to_copy, aten.mul, aten.add]
triton_poi_fused__to_copy_add_mul_ne_1.run(buf1, primals_1, 256, grid=grid(256), stream=stream0)
del primals_1
buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [embedding], Original ATen: [aten.embedding]
triton_poi_fused_embedding_2.run(buf1, primals_2, buf2, 1024, grid=grid(1024), stream=stream0)
del primals_2
return (buf2, buf1, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((9, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
def create_position_ids_from_input_ids(input_ids, padding_idx):
""" Replace non-padding symbols with their position numbers. Position numbers begin at
padding_idx+1. Padding symbols are ignored. This is modified from fairseq's
`utils.make_positions`.
:param torch.Tensor x:
:return torch.Tensor:
"""
mask = input_ids.ne(padding_idx).int()
incremental_indices = torch.cumsum(mask, dim=1).type_as(mask) * mask
return incremental_indices.long() + padding_idx
class LearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
Padding ids are ignored by either offsetting based on padding_idx
or by setting padding_idx to None and ensuring that the appropriate
position ids are passed to the forward function.
"""
def __init__(self, num_embeddings: 'int', embedding_dim: 'int',
padding_idx: 'int'):
assert padding_idx is not None
num_embeddings += padding_idx + 1
super().__init__(num_embeddings, embedding_dim, padding_idx=padding_idx
)
def forward(self, input, use_cache=False):
"""Input is expected to be of size [bsz x seqlen]."""
if use_cache:
pos = int(self.padding_idx + input.size(1))
positions = input.data.new(1, 1).fill_(pos)
else:
positions = create_position_ids_from_input_ids(input, self.
padding_idx)
return super().forward(positions), positions
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_embeddings': 4, 'embedding_dim': 4, 'padding_idx': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def _triton_helper_fn_add0(arg0_0, arg1_0):
tmp0 = arg0_0 + arg1_0
return tmp0
@triton.jit
def triton_per_fused__to_copy_cumsum_ne_0(in_ptr0, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 16
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 64 * x1), xmask, other=0.0)
tmp1 = 4.0
tmp2 = tmp0 != tmp1
tmp3 = tmp2.to(tl.int32)
tmp4 = tmp3.to(tl.int64)
tmp5 = tmp4.to(tl.int64)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp7, = tl.associative_scan((tmp6,), 1, _triton_helper_fn_add0)
tl.store(out_ptr0 + (x0 + 16 * r2 + 64 * x1), tmp7, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_mul_ne_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0.to(tl.int32)
tmp3 = 4.0
tmp4 = tmp2 != tmp3
tmp5 = tmp4.to(tl.int32)
tmp6 = tmp1 * tmp5
tmp7 = tmp6.to(tl.int64)
tmp8 = tl.full([1], 4, tl.int64)
tmp9 = tmp7 + tmp8
tl.store(in_out_ptr0 + x0, tmp9, xmask)
@triton.jit
def triton_poi_fused_embedding_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 9, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 9) | ~xmask,
'index out of bounds: 0 <= tmp4 < 9')
tmp6 = tl.load(in_ptr1 + (x0 + 4 * tmp4), xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (9, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int64)
get_raw_stream(0)
triton_per_fused__to_copy_cumsum_ne_0[grid(64)](primals_1, buf0, 64,
4, XBLOCK=1, num_warps=2, num_stages=1)
buf1 = buf0
del buf0
triton_poi_fused__to_copy_add_mul_ne_1[grid(256)](buf1, primals_1,
256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_embedding_2[grid(1024)](buf1, primals_2, buf2,
1024, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf2, buf1, buf1
def create_position_ids_from_input_ids(input_ids, padding_idx):
""" Replace non-padding symbols with their position numbers. Position numbers begin at
padding_idx+1. Padding symbols are ignored. This is modified from fairseq's
`utils.make_positions`.
:param torch.Tensor x:
:return torch.Tensor:
"""
mask = input_ids.ne(padding_idx).int()
incremental_indices = torch.cumsum(mask, dim=1).type_as(mask) * mask
return incremental_indices.long() + padding_idx
class LearnedPositionalEmbeddingNew(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
Padding ids are ignored by either offsetting based on padding_idx
or by setting padding_idx to None and ensuring that the appropriate
position ids are passed to the forward function.
"""
def __init__(self, num_embeddings: 'int', embedding_dim: 'int',
padding_idx: 'int'):
assert padding_idx is not None
num_embeddings += padding_idx + 1
super().__init__(num_embeddings, embedding_dim, padding_idx=padding_idx
)
def forward(self, input_0):
primals_2 = self.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0], output[1]
| JuruoMP/Text2SQL-Multiturn | LearnedPositionalEmbedding | false | 2,454 | [
"Apache-2.0"
] | 0 | 1c7d1a93d638650a63959327a07c804d1d013e0e | https://github.com/JuruoMP/Text2SQL-Multiturn/tree/1c7d1a93d638650a63959327a07c804d1d013e0e | import torch
import torch.nn as nn
import torch.utils.data
def create_position_ids_from_input_ids(input_ids, padding_idx):
""" Replace non-padding symbols with their position numbers. Position numbers begin at
padding_idx+1. Padding symbols are ignored. This is modified from fairseq's
`utils.make_positions`.
:param torch.Tensor x:
:return torch.Tensor:
"""
mask = input_ids.ne(padding_idx).int()
incremental_indices = torch.cumsum(mask, dim=1).type_as(mask) * mask
return incremental_indices.long() + padding_idx
class Model(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
Padding ids are ignored by either offsetting based on padding_idx
or by setting padding_idx to None and ensuring that the appropriate
position ids are passed to the forward function.
"""
def __init__(self, num_embeddings: 'int', embedding_dim: 'int',
padding_idx: 'int'):
assert padding_idx is not None
num_embeddings += padding_idx + 1
super().__init__(num_embeddings, embedding_dim, padding_idx=padding_idx
)
def forward(self, input, use_cache=False):
"""Input is expected to be of size [bsz x seqlen]."""
if use_cache:
pos = int(self.padding_idx + input.size(1))
positions = input.data.new(1, 1).fill_(pos)
else:
positions = create_position_ids_from_input_ids(input, self.
padding_idx)
return super().forward(positions), positions
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
CMDS_Loss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/if/cifzfs6xjtoe3hyibakxibge2wf4dwkajxdfqdom3v4wsid4im3f.py
# Topologically Sorted Source Nodes: [mean, m], Original ATen: [aten.mean, aten.sub]
# Source node to ATen node mapping:
# m => sub
# mean => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%permute, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %mean), kwargs = {})
triton_poi_fused_mean_sub_0 = async_compile.triton('triton_poi_fused_mean_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/xy/cxyejw3v2tom45hbokfxiuszvwckow3ssrglcihjalzfzebxxrge.py
# Topologically Sorted Source Nodes: [XTX, YTY, sub_2, norm, cmds], Original ATen: [aten.mul, aten.sub, aten.linalg_vector_norm, aten.pow]
# Source node to ATen node mapping:
# XTX => mul
# YTY => mul_1
# cmds => pow_3
# norm => pow_1, pow_2, sum_1
# sub_2 => sub_2
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, 0.25), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_1, 0.25), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mul_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, None), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%pow_2, 2), kwargs = {})
triton_per_fused_linalg_vector_norm_mul_pow_sub_1 = async_compile.triton('triton_per_fused_linalg_vector_norm_mul_pow_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_linalg_vector_norm_mul_pow_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_linalg_vector_norm_mul_pow_sub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp3 = tl.load(in_ptr1 + (r0), None)
tmp1 = 0.25
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = tmp2 - tmp4
tmp6 = tmp5 * tmp5
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.sum(tmp7, 1)[:, None]
tmp10 = libdevice.sqrt(tmp9)
tmp11 = tmp10 * tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp11, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [mean, m], Original ATen: [aten.mean, aten.sub]
stream0 = get_raw_stream(0)
triton_poi_fused_mean_sub_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm]
extern_kernels.mm(buf0, reinterpret_tensor(buf0, (4, 4), (4, 1), 0), out=buf1)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mean_1, m_1], Original ATen: [aten.mean, aten.sub]
triton_poi_fused_mean_sub_0.run(arg1_1, buf2, 16, grid=grid(16), stream=stream0)
del arg1_1
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.mm]
extern_kernels.mm(buf2, reinterpret_tensor(buf2, (4, 4), (4, 1), 0), out=buf3)
del buf2
buf4 = empty_strided_cuda((), (), torch.float32)
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [XTX, YTY, sub_2, norm, cmds], Original ATen: [aten.mul, aten.sub, aten.linalg_vector_norm, aten.pow]
triton_per_fused_linalg_vector_norm_mul_pow_sub_1.run(buf5, buf1, buf3, 1, 16, grid=grid(1), stream=stream0)
del buf1
del buf3
return (buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from sklearn.preprocessing import scale as scale
def Covariance(m, bias=False, rowvar=True, inplace=False):
""" Estimate a covariance matrix given data(tensor).
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, `X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element `C_{ij}` is the covariance of
`x_i` and `x_j`. The element `C_{ii}` is the variance of `x_i`.
Args:
m: numpy array - A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables.
rowvar: bool - If `rowvar` is True, then each row represents a
variable, with observations in the columns. Otherwise, the
relationship is transposed: each column represents a variable,
while the rows contain observations.
Returns:
The covariance matrix of the variables.
"""
if m.dim() > 2:
raise ValueError('m has more than 2 dimensions')
if m.dim() < 2:
m = m.view(1, -1)
if not rowvar and m.size(0) != 1:
m = m.t()
fact = 1.0 / (m.size(1) - 1) if not bias else 1.0 / m.size(1)
if inplace:
m -= torch.mean(m, dim=1, keepdim=True)
else:
m = m - torch.mean(m, dim=1, keepdim=True)
mt = m.t()
return fact * m.matmul(mt).squeeze()
class CMDS_Loss(nn.Module):
"""Equation(1) in Self-calibrating Neural Networks for Dimensionality Reduction
Attributes:
X: tensor - original datas.
Y: tensor - encoded datas.
Returns:
cmds: float - The cmds loss.
"""
def __init__(self):
super(CMDS_Loss, self).__init__()
def forward(self, y, x):
XTX = Covariance(x.T, bias=True)
YTY = Covariance(y.T, bias=True)
cmds = torch.norm(XTX - YTY) ** 2
return cmds
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
from sklearn.preprocessing import scale as scale
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_per_fused_linalg_vector_norm_mul_pow_sub_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 0.25
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = tmp2 - tmp4
tmp6 = tmp5 * tmp5
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.sum(tmp7, 1)[:, None]
tmp10 = libdevice.sqrt(tmp9)
tmp11 = tmp10 * tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp11, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_sub_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(buf0, (4, 4), (4, 1), 0),
out=buf1)
buf2 = buf0
del buf0
triton_poi_fused_mean_sub_0[grid(16)](arg1_1, buf2, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del arg1_1
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(buf2, (4, 4), (4, 1), 0),
out=buf3)
del buf2
buf4 = empty_strided_cuda((), (), torch.float32)
buf5 = buf4
del buf4
triton_per_fused_linalg_vector_norm_mul_pow_sub_1[grid(1)](buf5,
buf1, buf3, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
del buf1
del buf3
return buf5,
def Covariance(m, bias=False, rowvar=True, inplace=False):
""" Estimate a covariance matrix given data(tensor).
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, `X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element `C_{ij}` is the covariance of
`x_i` and `x_j`. The element `C_{ii}` is the variance of `x_i`.
Args:
m: numpy array - A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables.
rowvar: bool - If `rowvar` is True, then each row represents a
variable, with observations in the columns. Otherwise, the
relationship is transposed: each column represents a variable,
while the rows contain observations.
Returns:
The covariance matrix of the variables.
"""
if m.dim() > 2:
raise ValueError('m has more than 2 dimensions')
if m.dim() < 2:
m = m.view(1, -1)
if not rowvar and m.size(0) != 1:
m = m.t()
fact = 1.0 / (m.size(1) - 1) if not bias else 1.0 / m.size(1)
if inplace:
m -= torch.mean(m, dim=1, keepdim=True)
else:
m = m - torch.mean(m, dim=1, keepdim=True)
mt = m.t()
return fact * m.matmul(mt).squeeze()
class CMDS_LossNew(nn.Module):
"""Equation(1) in Self-calibrating Neural Networks for Dimensionality Reduction
Attributes:
X: tensor - original datas.
Y: tensor - encoded datas.
Returns:
cmds: float - The cmds loss.
"""
def __init__(self):
super(CMDS_LossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| CyprienGille/Supervised-Autoencoder | CMDS_Loss | false | 2,455 | [
"MIT"
] | 0 | fc8a3002d5b06319750601be586c7ca160f2189e | https://github.com/CyprienGille/Supervised-Autoencoder/tree/fc8a3002d5b06319750601be586c7ca160f2189e | import torch
from torch import nn
from sklearn.preprocessing import scale as scale
def Covariance(m, bias=False, rowvar=True, inplace=False):
""" Estimate a covariance matrix given data(tensor).
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, `X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element `C_{ij}` is the covariance of
`x_i` and `x_j`. The element `C_{ii}` is the variance of `x_i`.
Args:
m: numpy array - A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables.
rowvar: bool - If `rowvar` is True, then each row represents a
variable, with observations in the columns. Otherwise, the
relationship is transposed: each column represents a variable,
while the rows contain observations.
Returns:
The covariance matrix of the variables.
"""
if m.dim() > 2:
raise ValueError('m has more than 2 dimensions')
if m.dim() < 2:
m = m.view(1, -1)
if not rowvar and m.size(0) != 1:
m = m.t()
fact = 1.0 / (m.size(1) - 1) if not bias else 1.0 / m.size(1)
if inplace:
m -= torch.mean(m, dim=1, keepdim=True)
else:
m = m - torch.mean(m, dim=1, keepdim=True)
mt = m.t()
return fact * m.matmul(mt).squeeze()
class Model(nn.Module):
"""Equation(1) in Self-calibrating Neural Networks for Dimensionality Reduction
Attributes:
X: tensor - original datas.
Y: tensor - encoded datas.
Returns:
cmds: float - The cmds loss.
"""
def __init__(self):
super().__init__()
def forward(self, y, x):
XTX = Covariance(x.T, bias=True)
YTY = Covariance(y.T, bias=True)
cmds = torch.norm(XTX - YTY) ** 2
return cmds
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return []
|
PreNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/6o/c6o7ainbzocsswla76yvmdsc5donraaar3dzlx2icwrueb7fc46u.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/dh/cdhj4aozvvzkw7stzrqoauyoij3petwtvi4g4weydesiaurrughd.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_4 => relu_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (128, 256), (256, 1))
assert_size_stride(primals_5, (128, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0); del buf0 # reuse
buf11 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf11, 16384, grid=grid(16384), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.relu, aten.native_dropout]
buf2 = torch.ops.aten.native_dropout.default(buf1, 0.5, True)
del buf1
buf3 = buf2[0]
buf4 = buf2[1]
del buf2
buf5 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (64, 256), (256, 1), 0), reinterpret_tensor(primals_4, (256, 128), (1, 256), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf5 # reuse
buf10 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf6, primals_5, buf10, 8192, grid=grid(8192), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.relu, aten.native_dropout]
buf7 = torch.ops.aten.native_dropout.default(buf6, 0.5, True)
del buf6
buf8 = buf7[0]
buf9 = buf7[1]
del buf7
return (buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf4, reinterpret_tensor(buf3, (64, 256), (256, 1), 0), buf9, buf10, primals_4, buf11, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((256, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((128, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class PreNet(nn.Module):
def __init__(self, in_dims, fc1_dims=256, fc2_dims=128, dropout=0.5):
super().__init__()
self.fc1 = nn.Linear(in_dims, fc1_dims)
self.fc2 = nn.Linear(fc1_dims, fc2_dims)
self.p = dropout
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = F.dropout(x, self.p, training=True)
x = self.fc2(x)
x = F.relu(x)
x = F.dropout(x, self.p, training=True)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dims': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (128, 256), (256, 1))
assert_size_stride(primals_5, (128,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf0
buf11 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1,
primals_2, buf11, 16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = torch.ops.aten.native_dropout.default(buf1, 0.5, True)
del buf1
buf3 = buf2[0]
buf4 = buf2[1]
del buf2
buf5 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 256), (256, 1), 0),
reinterpret_tensor(primals_4, (256, 128), (1, 256), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf5
buf10 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(8192)](buf6,
primals_5, buf10, 8192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf7 = torch.ops.aten.native_dropout.default(buf6, 0.5, True)
del buf6
buf8 = buf7[0]
buf9 = buf7[1]
del buf7
return buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf4, reinterpret_tensor(buf3, (64, 256), (256, 1), 0
), buf9, buf10, primals_4, buf11
class PreNetNew(nn.Module):
def __init__(self, in_dims, fc1_dims=256, fc2_dims=128, dropout=0.5):
super().__init__()
self.fc1 = nn.Linear(in_dims, fc1_dims)
self.fc2 = nn.Linear(fc1_dims, fc2_dims)
self.p = dropout
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| KonstantinPakulev/OSM-one-shot-multispeaker | PreNet | false | 2,456 | [
"MIT"
] | 0 | 5cee1b6cb7dc7a3b4b24171340855a42824925f7 | https://github.com/KonstantinPakulev/OSM-one-shot-multispeaker/tree/5cee1b6cb7dc7a3b4b24171340855a42824925f7 | import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, in_dims, fc1_dims=256, fc2_dims=128, dropout=0.5):
super().__init__()
self.fc1 = nn.Linear(in_dims, fc1_dims)
self.fc2 = nn.Linear(fc1_dims, fc2_dims)
self.p = dropout
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = F.dropout(x, self.p, training=True)
x = self.fc2(x)
x = F.relu(x)
x = F.dropout(x, self.p, training=True)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
HighwayNetwork | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/mx/cmxbsuduhrb3re3kyyjzil6fiub7wk6y3jw7tjv4rnhxateuwfne.py
# Topologically Sorted Source Nodes: [g, relu, mul, sub, mul_1, y], Original ATen: [aten.sigmoid, aten.relu, aten.mul, aten.rsub, aten.add]
# Source node to ATen node mapping:
# g => sigmoid
# mul => mul
# mul_1 => mul_1
# relu => relu
# sub => sub
# y => add
# Graph fragment:
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_3,), kwargs = {})
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %relu), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %sigmoid), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %primals_3), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
triton_poi_fused_add_mul_relu_rsub_sigmoid_0 = async_compile.triton('triton_poi_fused_add_mul_relu_rsub_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_relu_rsub_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_relu_rsub_sigmoid_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask)
tmp8 = tl.load(in_ptr2 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tmp1 * tmp4
tmp6 = 1.0
tmp7 = tmp6 - tmp1
tmp9 = tmp7 * tmp8
tmp10 = tmp5 + tmp9
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [g, relu, mul, sub, mul_1, y], Original ATen: [aten.sigmoid, aten.relu, aten.mul, aten.rsub, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_relu_rsub_sigmoid_0.run(buf1, buf0, primals_3, buf2, 256, grid=grid(256), stream=stream0)
return (buf2, primals_3, buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class HighwayNetwork(nn.Module):
def __init__(self, size):
super().__init__()
self.W1 = nn.Linear(size, size)
self.W2 = nn.Linear(size, size)
self.W1.bias.data.fill_(0.0)
def forward(self, x):
x1 = self.W1(x)
x2 = self.W2(x)
g = torch.sigmoid(x2)
y = g * F.relu(x1) + (1.0 - g) * x
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_relu_rsub_sigmoid_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp8 = tl.load(in_ptr2 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tmp1 * tmp4
tmp6 = 1.0
tmp7 = tmp6 - tmp1
tmp9 = tmp7 * tmp8
tmp10 = tmp5 + tmp9
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_relu_rsub_sigmoid_0[grid(256)](buf1, buf0,
primals_3, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
return buf2, primals_3, buf0, buf1
class HighwayNetworkNew(nn.Module):
def __init__(self, size):
super().__init__()
self.W1 = nn.Linear(size, size)
self.W2 = nn.Linear(size, size)
self.W1.bias.data.fill_(0.0)
def forward(self, input_0):
primals_1 = self.W1.weight
primals_2 = self.W1.bias
primals_4 = self.W2.weight
primals_5 = self.W2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| KonstantinPakulev/OSM-one-shot-multispeaker | HighwayNetwork | false | 2,457 | [
"MIT"
] | 0 | 5cee1b6cb7dc7a3b4b24171340855a42824925f7 | https://github.com/KonstantinPakulev/OSM-one-shot-multispeaker/tree/5cee1b6cb7dc7a3b4b24171340855a42824925f7 | import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, size):
super().__init__()
self.W1 = nn.Linear(size, size)
self.W2 = nn.Linear(size, size)
self.W1.bias.data.fill_(0.0)
def forward(self, x):
x1 = self.W1(x)
x2 = self.W2(x)
g = torch.sigmoid(x2)
y = g * F.relu(x1) + (1.0 - g) * x
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
T5LayerNorm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/sk/csk6e2ijxwozqfg5yafv56tovxgvs23ahvpatwnswiausxio766y.py
# Topologically Sorted Source Nodes: [pow_1, variance, add, rsqrt, hidden_states, mul_1], Original ATen: [aten.pow, aten.mean, aten.add, aten.rsqrt, aten.mul]
# Source node to ATen node mapping:
# add => add
# hidden_states => mul
# mul_1 => mul_1
# pow_1 => pow_1
# rsqrt => rsqrt
# variance => mean
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1], True), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, 1e-06), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %mul), kwargs = {})
triton_poi_fused_add_mean_mul_pow_rsqrt_0 = async_compile.triton('triton_poi_fused_add_mean_mul_pow_rsqrt_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_mul_pow_rsqrt_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mean_mul_pow_rsqrt_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = 4.0
tmp14 = tmp12 / tmp13
tmp15 = 1e-06
tmp16 = tmp14 + tmp15
tmp17 = libdevice.rsqrt(tmp16)
tmp18 = tmp1 * tmp17
tmp19 = tmp0 * tmp18
tl.store(out_ptr0 + (x2), tmp19, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, variance, add, rsqrt, hidden_states, mul_1], Original ATen: [aten.pow, aten.mean, aten.add, aten.rsqrt, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mean_mul_pow_rsqrt_0.run(primals_2, primals_1, buf0, 256, grid=grid(256), stream=stream0)
del primals_2
return (buf0, primals_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.checkpoint
class T5LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
"""
Construct a layernorm module in the T5 style No bias and no subtraction of mean.
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.
variance_epsilon)
if self.weight.dtype == torch.float16:
hidden_states = hidden_states
return self.weight * hidden_states
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mean_mul_pow_rsqrt_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = 4.0
tmp14 = tmp12 / tmp13
tmp15 = 1e-06
tmp16 = tmp14 + tmp15
tmp17 = libdevice.rsqrt(tmp16)
tmp18 = tmp1 * tmp17
tmp19 = tmp0 * tmp18
tl.store(out_ptr0 + x2, tmp19, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mean_mul_pow_rsqrt_0[grid(256)](primals_2,
primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
return buf0, primals_1
class T5LayerNormNew(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
"""
Construct a layernorm module in the T5 style No bias and no subtraction of mean.
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, input_0):
primals_2 = self.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| Hzfinfdu/Black-Box-Tuning | T5LayerNorm | false | 2,458 | [
"MIT"
] | 0 | 64eb5505875dc1b242c6f0a2a2f07e4000c24cb4 | https://github.com/Hzfinfdu/Black-Box-Tuning/tree/64eb5505875dc1b242c6f0a2a2f07e4000c24cb4 | import torch
import torch.nn as nn
import torch.utils.checkpoint
class Model(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
"""
Construct a layernorm module in the T5 style No bias and no subtraction of mean.
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.
variance_epsilon)
if self.weight.dtype == torch.float16:
hidden_states = hidden_states
return self.weight * hidden_states
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
DAE_Module | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/o3/co3q6tbx7gekgbm25hsgcmgft6mw5snxx3t5xeskm4ht2tq446a7.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => convolution
# x_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1], [1], [1], False, [0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/qr/cqrggygrravhrrgo5n766daqgy3wnjqbhtx6x6twnpkpjaaqfmua.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_2 => _low_memory_max_pool2d_with_offsets, getitem_1
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%unsqueeze, [1, 2], [1, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + (2*x0), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0)), None, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + (x0), tmp5, None)
tl.store(out_ptr1 + (x0), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/kg/ckg3odfjcidiugyz6p6ovpedqg2v7xks75lsvjtoqflrvzicmqra.py
# Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_3 => convolution_1
# x_4 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%squeeze, %primals_4, %primals_5, [1], [1], [1], False, [0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 32) % 128
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/mg/cmg3mglvfrxhbkvyfvuncdih5w4z7ymoaq47spaetmonadbnglqy.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_5 => getitem_3
# Graph fragment:
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + (2*x0), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0)), None, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + (x0), tmp5, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ru/crutmejzeeoz2zfh3rjlxecxytzgxirqk7yzn5pkj6o6r5mv43ha.py
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
# Source node to ATen node mapping:
# x_6 => add, add_1, convert_element_type, convert_element_type_1, iota, mul, mul_1
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (8000,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota, 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 0), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add, torch.float32), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 0.002), kwargs = {})
# %convert_element_type_1 : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_1, torch.int64), kwargs = {})
triton_poi_fused__to_copy_add_arange_mul_4 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_4(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.002
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/4k/c4kwur5l5t5hg6j4jm6pfiwuu3mbs2p33vluckakskes6mve6ww7.py
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten._unsafe_index]
# Source node to ATen node mapping:
# x_6 => _unsafe_index
# Graph fragment:
# %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%squeeze_2, [None, None, %convert_element_type_1]), kwargs = {})
triton_poi_fused__unsafe_index_5 = async_compile.triton('triton_poi_fused__unsafe_index_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4194304],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8000
x1 = (xindex // 8000)
x2 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.002
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tmp5 = tl.load(in_ptr0 + ((2*tmp4) + (32*x1)), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (2*tmp4) + (32*x1)), None, eviction_policy='evict_last')
tmp7 = triton_helpers.maximum(tmp6, tmp5)
tl.store(out_ptr0 + (x2), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/un/cunlycocjjooniyuc7pfvcukfmhvgjnj4bam7m2x5bbsmdpielmb.py
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
# Source node to ATen node mapping:
# x_9 => add_2, add_3, convert_element_type_2, convert_element_type_3, iota_1, mul_2, mul_3
# Graph fragment:
# %iota_1 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (16000,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_1, 1), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, 0), kwargs = {})
# %convert_element_type_2 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_2, torch.float32), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_2, 0.0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_3, 0.5), kwargs = {})
# %convert_element_type_3 : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_3, torch.int64), kwargs = {})
triton_poi_fused__to_copy_add_arange_mul_6 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_6(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/js/cjsiuuciggswr2crlysufqqop6of4yvicahgtk7zt2lkjuapwchk.py
# Topologically Sorted Source Nodes: [x_7, x_8, x_9], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index]
# Source node to ATen node mapping:
# x_7 => convolution_2
# x_8 => relu_2
# x_9 => _unsafe_index_1
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index, %primals_6, %primals_7, [1], [1], [1], False, [0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
# %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_2, [None, None, %convert_element_type_3]), kwargs = {})
triton_poi_fused__unsafe_index_convolution_relu_7 = async_compile.triton('triton_poi_fused__unsafe_index_convolution_relu_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4194304],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_convolution_relu_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_convolution_relu_7(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16000
x3 = (xindex // 16000)
x1 = (xindex // 16000) % 64
x4 = xindex
tmp6 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tmp5 = tl.load(in_ptr0 + (tmp4 + (8000*x3)), None, eviction_policy='evict_last')
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tl.store(out_ptr0 + (x4), tmp9, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/fb/cfbbywt566gtwtx4kyhulj7stlfuosz7onbhvizfzztbvhdfrki4.py
# Topologically Sorted Source Nodes: [x_10, x_11], Original ATen: [aten.convolution, aten.tanh]
# Source node to ATen node mapping:
# x_10 => convolution_3
# x_11 => tanh
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_8, %primals_9, [1], [1], [1], False, [0], 1), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%convolution_3,), kwargs = {})
triton_poi_fused_convolution_tanh_8 = async_compile.triton('triton_poi_fused_convolution_tanh_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_tanh_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_tanh_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = libdevice.tanh(tmp3)
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/5r/c5re33oaw4kvrxa2ekjnr2rbjfm56nw6dvq4r5n6v22vbrfxttwa.py
# Topologically Sorted Source Nodes: [x_7, x_8], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_7 => convolution_2
# x_8 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index, %primals_6, %primals_7, [1], [1], [1], False, [0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_9 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_9(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 8000) % 64
x0 = xindex % 8000
x4 = (xindex // 8000)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x0 + (8064*x4)), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (64, 1, 3), (3, 3, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 1, 64), (64, 64, 1))
assert_size_stride(primals_4, (128, 64, 3), (192, 3, 1))
assert_size_stride(primals_5, (128, ), (1, ))
assert_size_stride(primals_6, (64, 128, 3), (384, 3, 1))
assert_size_stride(primals_7, (64, ), (1, ))
assert_size_stride(primals_8, (1, 64, 3), (192, 3, 1))
assert_size_stride(primals_9, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64), (4096, 64, 1))
buf1 = buf0; del buf0 # reuse
buf16 = empty_strided_cuda((4, 64, 64), (4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0.run(buf1, primals_2, buf16, 16384, grid=grid(16384), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 64, 1, 32), (2048, 32, 32, 1), torch.int8)
buf3 = empty_strided_cuda((4, 64, 1, 32), (2048, 32, 32, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 8192, grid=grid(8192), stream=stream0)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(reinterpret_tensor(buf3, (4, 64, 32), (2048, 32, 1), 0), primals_4, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf4, (4, 128, 32), (4096, 32, 1))
buf5 = buf4; del buf4 # reuse
buf15 = empty_strided_cuda((4, 128, 32), (4096, 32, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_2.run(buf5, primals_5, buf15, 16384, grid=grid(16384), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((4, 128, 1, 16), (2048, 16, 16, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, 8192, grid=grid(8192), stream=stream0)
buf7 = empty_strided_cuda((8000, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
triton_poi_fused__to_copy_add_arange_mul_4.run(buf7, 8000, grid=grid(8000), stream=stream0)
buf8 = empty_strided_cuda((4, 128, 8000), (1024000, 8000, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten._unsafe_index]
triton_poi_fused__unsafe_index_5.run(buf5, buf8, 4096000, grid=grid(4096000), stream=stream0)
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf8, primals_6, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf9, (4, 64, 8000), (512000, 8000, 1))
buf10 = empty_strided_cuda((16000, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
triton_poi_fused__to_copy_add_arange_mul_6.run(buf10, 16000, grid=grid(16000), stream=stream0)
buf11 = empty_strided_cuda((4, 64, 16000), (1024000, 16000, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_7, x_8, x_9], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index]
triton_poi_fused__unsafe_index_convolution_relu_7.run(buf9, primals_7, buf11, 4096000, grid=grid(4096000), stream=stream0)
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf11, primals_8, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf12, (4, 1, 16000), (16000, 16000, 1))
buf13 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [x_10, x_11], Original ATen: [aten.convolution, aten.tanh]
triton_poi_fused_convolution_tanh_8.run(buf13, primals_9, 64000, grid=grid(64000), stream=stream0)
del primals_9
buf14 = empty_strided_cuda((4, 64, 8000), (516096, 8064, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_7, x_8], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_9.run(buf9, primals_7, buf14, 2048000, grid=grid(2048000), stream=stream0)
del buf9
del primals_7
return (buf13, primals_1, primals_3, primals_4, primals_6, primals_8, reinterpret_tensor(buf1, (4, 64, 1, 64), (4096, 64, 64, 1), 0), buf2, reinterpret_tensor(buf3, (4, 64, 32), (2048, 32, 1), 0), reinterpret_tensor(buf5, (4, 128, 1, 32), (4096, 32, 32, 1), 0), buf6, buf7, buf8, buf10, buf11, buf13, buf14, buf15, buf16, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 1, 3), (3, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 64), (64, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((128, 64, 3), (192, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((64, 128, 3), (384, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, 64, 3), (192, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
self.conv1 = torch.nn.Conv1d(1, 64, 3, padding=1)
self.maxp1 = torch.nn.MaxPool1d(2, padding=0)
self.conv2 = torch.nn.Conv1d(64, 128, 3, padding=1)
self.maxp2 = torch.nn.MaxPool1d(2, padding=0)
def forward(self, x):
x = self.conv1(x)
x = torch.relu(x)
x = self.maxp1(x)
x = self.conv2(x)
x = torch.relu(x)
x = self.maxp2(x)
return x
class Decoder(nn.Module):
def __init__(self, sampling_rate=16000.0):
super(Decoder, self).__init__()
self.sampling_rate = sampling_rate
self.upsa1 = torch.nn.Upsample(int(sampling_rate / 2))
self.conv3 = torch.nn.Conv1d(128, 64, 3, padding=1)
self.upsa2 = torch.nn.Upsample(int(sampling_rate))
self.conv4 = torch.nn.Conv1d(64, 1, 3, padding=1)
def forward(self, x):
x = self.upsa1(x)
x = self.conv3(x)
x = torch.relu(x)
x = self.upsa2(x)
x = self.conv4(x)
x = torch.tanh(x)
return x
class DAE_Module(nn.Module):
def __init__(self, sampling_rate=16000.0):
super(DAE_Module, self).__init__()
self.sampling_rate = int(sampling_rate)
self.encoder = Encoder()
self.decoder = Decoder(sampling_rate=self.sampling_rate)
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
def get_inputs():
return [torch.rand([4, 1, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, None)
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + 2 * x0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0), None, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + x0, tmp5, None)
tl.store(out_ptr1 + x0, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_2(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 32 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, None)
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + 2 * x0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0), None, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + x0, tmp5, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_4(out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 8000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.002
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8000
x1 = xindex // 8000
x2 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.002
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tmp5 = tl.load(in_ptr0 + (2 * tmp4 + 32 * x1), None, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 2 * tmp4 + 32 * x1), None,
eviction_policy='evict_last')
tmp7 = triton_helpers.maximum(tmp6, tmp5)
tl.store(out_ptr0 + x2, tmp7, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_6(out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_convolution_relu_7(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16000
x3 = xindex // 16000
x1 = xindex // 16000 % 64
x4 = xindex
tmp6 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tmp5 = tl.load(in_ptr0 + (tmp4 + 8000 * x3), None, eviction_policy=
'evict_last')
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tl.store(out_ptr0 + x4, tmp9, None)
@triton.jit
def triton_poi_fused_convolution_tanh_8(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = libdevice.tanh(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_9(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 8000 % 64
x0 = xindex % 8000
x4 = xindex // 8000
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x0 + 8064 * x4), tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (64, 1, 3), (3, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 1, 64), (64, 64, 1))
assert_size_stride(primals_4, (128, 64, 3), (192, 3, 1))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (64, 128, 3), (384, 3, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (1, 64, 3), (192, 3, 1))
assert_size_stride(primals_9, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,),
padding=(1,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64), (4096, 64, 1))
buf1 = buf0
del buf0
buf16 = empty_strided_cuda((4, 64, 64), (4096, 64, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0[grid(16384)](
buf1, primals_2, buf16, 16384, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 64, 1, 32), (2048, 32, 32, 1), torch.int8
)
buf3 = empty_strided_cuda((4, 64, 1, 32), (2048, 32, 32, 1), torch.
float32)
triton_poi_fused_max_pool2d_with_indices_1[grid(8192)](buf1, buf2,
buf3, 8192, XBLOCK=256, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(reinterpret_tensor(buf3, (4, 64,
32), (2048, 32, 1), 0), primals_4, stride=(1,), padding=(1,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf4, (4, 128, 32), (4096, 32, 1))
buf5 = buf4
del buf4
buf15 = empty_strided_cuda((4, 128, 32), (4096, 32, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_2[grid(16384)](
buf5, primals_5, buf15, 16384, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 128, 1, 16), (2048, 16, 16, 1), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_3[grid(8192)](buf5, buf6,
8192, XBLOCK=256, num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((8000,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_4[grid(8000)](buf7, 8000,
XBLOCK=256, num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((4, 128, 8000), (1024000, 8000, 1), torch
.float32)
triton_poi_fused__unsafe_index_5[grid(4096000)](buf5, buf8, 4096000,
XBLOCK=1024, num_warps=4, num_stages=1)
buf9 = extern_kernels.convolution(buf8, primals_6, stride=(1,),
padding=(1,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf9, (4, 64, 8000), (512000, 8000, 1))
buf10 = empty_strided_cuda((16000,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_6[grid(16000)](buf10,
16000, XBLOCK=256, num_warps=4, num_stages=1)
buf11 = empty_strided_cuda((4, 64, 16000), (1024000, 16000, 1),
torch.float32)
triton_poi_fused__unsafe_index_convolution_relu_7[grid(4096000)](buf9,
primals_7, buf11, 4096000, XBLOCK=512, num_warps=8, num_stages=1)
buf12 = extern_kernels.convolution(buf11, primals_8, stride=(1,),
padding=(1,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf12, (4, 1, 16000), (16000, 16000, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_tanh_8[grid(64000)](buf13, primals_9,
64000, XBLOCK=512, num_warps=4, num_stages=1)
del primals_9
buf14 = empty_strided_cuda((4, 64, 8000), (516096, 8064, 1), torch.bool
)
triton_poi_fused_convolution_relu_threshold_backward_9[grid(2048000)](
buf9, primals_7, buf14, 2048000, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf9
del primals_7
return (buf13, primals_1, primals_3, primals_4, primals_6, primals_8,
reinterpret_tensor(buf1, (4, 64, 1, 64), (4096, 64, 64, 1), 0),
buf2, reinterpret_tensor(buf3, (4, 64, 32), (2048, 32, 1), 0),
reinterpret_tensor(buf5, (4, 128, 1, 32), (4096, 32, 32, 1), 0),
buf6, buf7, buf8, buf10, buf11, buf13, buf14, buf15, buf16)
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
self.conv1 = torch.nn.Conv1d(1, 64, 3, padding=1)
self.maxp1 = torch.nn.MaxPool1d(2, padding=0)
self.conv2 = torch.nn.Conv1d(64, 128, 3, padding=1)
self.maxp2 = torch.nn.MaxPool1d(2, padding=0)
def forward(self, x):
x = self.conv1(x)
x = torch.relu(x)
x = self.maxp1(x)
x = self.conv2(x)
x = torch.relu(x)
x = self.maxp2(x)
return x
class Decoder(nn.Module):
def __init__(self, sampling_rate=16000.0):
super(Decoder, self).__init__()
self.sampling_rate = sampling_rate
self.upsa1 = torch.nn.Upsample(int(sampling_rate / 2))
self.conv3 = torch.nn.Conv1d(128, 64, 3, padding=1)
self.upsa2 = torch.nn.Upsample(int(sampling_rate))
self.conv4 = torch.nn.Conv1d(64, 1, 3, padding=1)
def forward(self, x):
x = self.upsa1(x)
x = self.conv3(x)
x = torch.relu(x)
x = self.upsa2(x)
x = self.conv4(x)
x = torch.tanh(x)
return x
class DAE_ModuleNew(nn.Module):
def __init__(self, sampling_rate=16000.0):
super(DAE_ModuleNew, self).__init__()
self.sampling_rate = int(sampling_rate)
self.encoder = Encoder()
self.decoder = Decoder(sampling_rate=self.sampling_rate)
def forward(self, input_0):
primals_1 = self.encoder.conv1.weight
primals_2 = self.encoder.conv1.bias
primals_4 = self.encoder.conv2.weight
primals_5 = self.encoder.conv2.bias
primals_6 = self.decoder.conv3.weight
primals_7 = self.decoder.conv3.bias
primals_8 = self.decoder.conv4.weight
primals_9 = self.decoder.conv4.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| Koukyosyumei/Zatsuon | DAE_Module | false | 2,459 | [
"Apache-2.0"
] | 0 | d7f520a282cf00bfd19d2dec300701c21403cba1 | https://github.com/Koukyosyumei/Zatsuon/tree/d7f520a282cf00bfd19d2dec300701c21403cba1 | import torch
import torch.nn as nn
class Encoder(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = torch.nn.Conv1d(1, 64, 3, padding=1)
self.maxp1 = torch.nn.MaxPool1d(2, padding=0)
self.conv2 = torch.nn.Conv1d(64, 128, 3, padding=1)
self.maxp2 = torch.nn.MaxPool1d(2, padding=0)
def forward(self, x):
x = self.conv1(x)
x = torch.relu(x)
x = self.maxp1(x)
x = self.conv2(x)
x = torch.relu(x)
x = self.maxp2(x)
return x
class Decoder(nn.Module):
def __init__(self, sampling_rate=16000.0):
super().__init__()
self.sampling_rate = sampling_rate
self.upsa1 = torch.nn.Upsample(int(sampling_rate / 2))
self.conv3 = torch.nn.Conv1d(128, 64, 3, padding=1)
self.upsa2 = torch.nn.Upsample(int(sampling_rate))
self.conv4 = torch.nn.Conv1d(64, 1, 3, padding=1)
def forward(self, x):
x = self.upsa1(x)
x = self.conv3(x)
x = torch.relu(x)
x = self.upsa2(x)
x = self.conv4(x)
x = torch.tanh(x)
return x
class Model(nn.Module):
def __init__(self, sampling_rate=16000.0):
super().__init__()
self.sampling_rate = int(sampling_rate)
self.encoder = Encoder()
self.decoder = Decoder(sampling_rate=self.sampling_rate)
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
def get_inputs():
return [torch.rand([4, 1, 64])]
def get_init_inputs():
return []
|
Net | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/gd/cgdwtqj75vevsjul4s5vtz2qhpuaoijtngm3emdfrduonysp66dd.py
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# relu => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 46464
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 1936) % 6
x0 = xindex % 1936
x4 = (xindex // 1936)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + (1952*x4)), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/tw/ctwnmw6uiqezscye2wforl2bbn4e7j4dgyyg2m6m7umk7ofahlck.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 11616
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 22
x1 = (xindex // 22) % 22
x4 = (xindex // 484)
x3 = (xindex // 2904)
x5 = xindex % 2904
tmp0 = tl.load(in_ptr0 + ((2*x0) + (88*x1) + (1952*x4)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (88*x1) + (1952*x4)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (44 + (2*x0) + (88*x1) + (1952*x4)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (45 + (2*x0) + (88*x1) + (1952*x4)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x5 + (2912*x3)), tmp6, xmask)
tl.store(out_ptr1 + (x5 + (2944*x3)), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/rf/crfvz4wksxjq2lxqcmnb3eitiw46xllwcvvnmt4ngy4vllwfhlxe.py
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# relu_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 20736
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 324) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/qx/cqxeczvdupkrgyayhmn3d5wexzpvhss2y6sfd4m3stwt2uvhhd5o.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_1 => _low_memory_max_pool2d_with_offsets_1, getitem_3
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 5184
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 9
x3 = (xindex // 9)
x2 = (xindex // 1296)
x4 = xindex % 1296
tmp0 = tl.load(in_ptr0 + ((2*x0) + (36*x3)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (36*x3)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (18 + (2*x0) + (36*x3)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (19 + (2*x0) + (36*x3)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x4 + (1408*x2)), tmp15, xmask)
tl.store(out_ptr1 + (x4 + (1312*x2)), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/mj/cmjebu67gck3lekokhkcr4wje4hidgwarthkksmczmgslosa2nto.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.max_pool2d_with_indices, aten.view]
# Source node to ATen node mapping:
# x_1 => _low_memory_max_pool2d_with_offsets_1
# x_2 => view
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %view : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%getitem_2, [-1, 576]), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_view_4 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_view_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_view_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_view_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 5184
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 576
x1 = (xindex // 576)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((9*(((x0 + (576*x1)) // 9) % 144)) + (1312*((x0 + (576*x1)) // 1296)) + (x0 % 9)), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/4p/c4pqt5y36x352qxu2w3svdkfwdex3nqgzyn4p4xrylhy7rcwa4kv.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_3 => relu_2
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_7), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_5 = async_compile.triton('triton_poi_fused_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1080
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 120
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/km/ckmzedqoppvjumcpvuwr7rlbwsu5t5uf452zjvb7mms5glmirjlz.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_4 => relu_3
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_9), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_6 = async_compile.triton('triton_poi_fused_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 756
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 84
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/nb/cnbkypudeeaehvy2mn47mbxxwb3hyvmwnhalcgzbw24i6navjw75.py
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# x_6 => amax, div, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_2, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_2, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_7 = async_compile.triton('triton_poi_fused__softmax_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_7(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 18
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 2)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (2*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (2*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 - tmp3
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp2 - tmp3
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tmp5 / tmp10
tl.store(out_ptr0 + (x2), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (6, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (6, ), (1, ))
assert_size_stride(primals_3, (4, 1, 48, 48), (2304, 2304, 48, 1))
assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (120, 576), (576, 1))
assert_size_stride(primals_7, (120, ), (1, ))
assert_size_stride(primals_8, (84, 120), (120, 1))
assert_size_stride(primals_9, (84, ), (1, ))
assert_size_stride(primals_10, (2, 84), (84, 1))
assert_size_stride(primals_11, (2, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 6, 44, 44), (11616, 1936, 44, 1))
buf1 = empty_strided_cuda((4, 6, 44, 44), (11712, 1952, 44, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf0, primals_2, buf1, 46464, grid=grid(46464), stream=stream0)
del buf0
del primals_2
buf2 = empty_strided_cuda((4, 6, 22, 22), (2912, 484, 22, 1), torch.float32)
buf3 = empty_strided_cuda((4, 6, 22, 22), (2944, 484, 22, 1), torch.int8)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 11616, grid=grid(11616), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 18, 18), (5184, 324, 18, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf5, primals_5, 20736, grid=grid(20736), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((4, 16, 9, 9), (1408, 81, 9, 1), torch.int8)
buf7 = empty_strided_cuda((4, 16, 9, 9), (1312, 81, 9, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 5184, grid=grid(5184), stream=stream0)
buf8 = empty_strided_cuda((9, 576), (576, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.max_pool2d_with_indices, aten.view]
triton_poi_fused_max_pool2d_with_indices_view_4.run(buf7, buf8, 5184, grid=grid(5184), stream=stream0)
del buf7
buf9 = empty_strided_cuda((9, 120), (120, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf8, reinterpret_tensor(primals_6, (576, 120), (1, 576), 0), out=buf9)
buf10 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu]
triton_poi_fused_relu_5.run(buf10, primals_7, 1080, grid=grid(1080), stream=stream0)
del primals_7
buf11 = empty_strided_cuda((9, 84), (84, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf10, reinterpret_tensor(primals_8, (120, 84), (1, 120), 0), out=buf11)
buf12 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu]
triton_poi_fused_relu_6.run(buf12, primals_9, 756, grid=grid(756), stream=stream0)
del primals_9
buf13 = empty_strided_cuda((9, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, buf12, reinterpret_tensor(primals_10, (84, 2), (1, 84), 0), alpha=1, beta=1, out=buf13)
del primals_11
buf14 = empty_strided_cuda((9, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten._softmax]
triton_poi_fused__softmax_7.run(buf13, buf14, 18, grid=grid(18), stream=stream0)
del buf13
return (buf14, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, buf8, buf10, buf12, buf14, primals_10, primals_8, primals_6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((6, 1, 5, 5), (25, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((6, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 48, 48), (2304, 2304, 48, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 6, 5, 5), (150, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((120, 576), (576, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((120, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((84, 120), (120, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((84, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((2, 84), (84, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 6 * 6, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 2)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 6 * 6)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
x = F.softmax(x, 1)
return x
def num_flat_features(self, x):
size = x.size()[1:]
num_features = 1
for s in size:
num_features *= s
return num_features
@staticmethod
def transform(png_picture):
transform = transforms.Compose([transforms.Grayscale(), transforms.
ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
)
return transform(png_picture)
def get_inputs():
return [torch.rand([4, 1, 48, 48])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torchvision.transforms as transforms
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 46464
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 1936 % 6
x0 = xindex % 1936
x4 = xindex // 1936
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + 1952 * x4), tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 11616
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 22
x1 = xindex // 22 % 22
x4 = xindex // 484
x3 = xindex // 2904
x5 = xindex % 2904
tmp0 = tl.load(in_ptr0 + (2 * x0 + 88 * x1 + 1952 * x4), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 88 * x1 + 1952 * x4), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (44 + 2 * x0 + 88 * x1 + 1952 * x4), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (45 + 2 * x0 + 88 * x1 + 1952 * x4), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x5 + 2912 * x3), tmp6, xmask)
tl.store(out_ptr1 + (x5 + 2944 * x3), tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 20736
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 324 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 5184
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 9
x3 = xindex // 9
x2 = xindex // 1296
x4 = xindex % 1296
tmp0 = tl.load(in_ptr0 + (2 * x0 + 36 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 36 * x3), xmask, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (18 + 2 * x0 + 36 * x3), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (19 + 2 * x0 + 36 * x3), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x4 + 1408 * x2), tmp15, xmask)
tl.store(out_ptr1 + (x4 + 1312 * x2), tmp16, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_view_4(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 5184
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 576
x1 = xindex // 576
x2 = xindex
tmp0 = tl.load(in_ptr0 + (9 * ((x0 + 576 * x1) // 9 % 144) + 1312 * ((
x0 + 576 * x1) // 1296) + x0 % 9), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1080
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 120
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 756
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 84
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 18
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 2
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 - tmp3
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp2 - tmp3
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tmp5 / tmp10
tl.store(out_ptr0 + x2, tmp11, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (6, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (6,), (1,))
assert_size_stride(primals_3, (4, 1, 48, 48), (2304, 2304, 48, 1))
assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (120, 576), (576, 1))
assert_size_stride(primals_7, (120,), (1,))
assert_size_stride(primals_8, (84, 120), (120, 1))
assert_size_stride(primals_9, (84,), (1,))
assert_size_stride(primals_10, (2, 84), (84, 1))
assert_size_stride(primals_11, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 6, 44, 44), (11616, 1936, 44, 1))
buf1 = empty_strided_cuda((4, 6, 44, 44), (11712, 1952, 44, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(46464)](buf0, primals_2,
buf1, 46464, XBLOCK=512, num_warps=4, num_stages=1)
del buf0
del primals_2
buf2 = empty_strided_cuda((4, 6, 22, 22), (2912, 484, 22, 1), torch
.float32)
buf3 = empty_strided_cuda((4, 6, 22, 22), (2944, 484, 22, 1), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(11616)](buf1, buf2,
buf3, 11616, XBLOCK=256, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 18, 18), (5184, 324, 18, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(20736)](buf5, primals_5,
20736, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 16, 9, 9), (1408, 81, 9, 1), torch.int8)
buf7 = empty_strided_cuda((4, 16, 9, 9), (1312, 81, 9, 1), torch.
float32)
triton_poi_fused_max_pool2d_with_indices_3[grid(5184)](buf5, buf6,
buf7, 5184, XBLOCK=256, num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((9, 576), (576, 1), torch.float32)
triton_poi_fused_max_pool2d_with_indices_view_4[grid(5184)](buf7,
buf8, 5184, XBLOCK=256, num_warps=4, num_stages=1)
del buf7
buf9 = empty_strided_cuda((9, 120), (120, 1), torch.float32)
extern_kernels.mm(buf8, reinterpret_tensor(primals_6, (576, 120), (
1, 576), 0), out=buf9)
buf10 = buf9
del buf9
triton_poi_fused_relu_5[grid(1080)](buf10, primals_7, 1080, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_7
buf11 = empty_strided_cuda((9, 84), (84, 1), torch.float32)
extern_kernels.mm(buf10, reinterpret_tensor(primals_8, (120, 84), (
1, 120), 0), out=buf11)
buf12 = buf11
del buf11
triton_poi_fused_relu_6[grid(756)](buf12, primals_9, 756, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_9
buf13 = empty_strided_cuda((9, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_11, buf12, reinterpret_tensor(
primals_10, (84, 2), (1, 84), 0), alpha=1, beta=1, out=buf13)
del primals_11
buf14 = empty_strided_cuda((9, 2), (2, 1), torch.float32)
triton_poi_fused__softmax_7[grid(18)](buf13, buf14, 18, XBLOCK=32,
num_warps=1, num_stages=1)
del buf13
return (buf14, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5,
buf6, buf8, buf10, buf12, buf14, primals_10, primals_8, primals_6)
class NetNew(nn.Module):
def __init__(self):
super(NetNew, self).__init__()
self.conv1 = nn.Conv2d(1, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 6 * 6, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 2)
def num_flat_features(self, x):
size = x.size()[1:]
num_features = 1
for s in size:
num_features *= s
return num_features
@staticmethod
def transform(png_picture):
transform = transforms.Compose([transforms.Grayscale(), transforms.
ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
)
return transform(png_picture)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_10 = self.fc3.weight
primals_11 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| Antloup/Deep-large-picture-database-indexing | Net | false | 2,460 | [
"MIT"
] | 0 | ac5368805a29376f54eba0657550d73e4739a235 | https://github.com/Antloup/Deep-large-picture-database-indexing/tree/ac5368805a29376f54eba0657550d73e4739a235 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 6 * 6, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 2)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 6 * 6)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
x = F.softmax(x, 1)
return x
def num_flat_features(self, x):
size = x.size()[1:]
num_features = 1
for s in size:
num_features *= s
return num_features
@staticmethod
def transform(png_picture):
transform = transforms.Compose([transforms.Grayscale(), transforms.
ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
)
return transform(png_picture)
def get_inputs():
return [torch.rand([4, 1, 48, 48])]
def get_init_inputs():
return []
|
Decoder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/3o/c3omxmuz7z47nwbr4kf355o77ktfwdgwifxgkfjgy4fxgfvs6li2.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten._unsafe_index]
# Source node to ATen node mapping:
# x => _unsafe_index
# Graph fragment:
# %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %convert_element_type_1]), kwargs = {})
triton_poi_fused__unsafe_index_0 = async_compile.triton('triton_poi_fused__unsafe_index_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4194304],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8000
x1 = (xindex // 8000)
x2 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.0005
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tmp5 = tl.load(in_ptr0 + (tmp4 + (4*x1)), None, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2), tmp5, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/wk/cwkquaubg7bycviyjocnpyd63eqqtvlh3co55pa6xiekmyan6tzq.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
# Source node to ATen node mapping:
# x_3 => add_2, add_3, convert_element_type_2, convert_element_type_3, iota_1, mul_2, mul_3
# Graph fragment:
# %iota_1 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (16000,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_1, 1), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, 0), kwargs = {})
# %convert_element_type_2 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_2, torch.float32), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_2, 0.0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_3, 0.5), kwargs = {})
# %convert_element_type_3 : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_3, torch.int64), kwargs = {})
triton_poi_fused__to_copy_add_arange_mul_1 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_1(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/va/cvazkafi2zyut7imzq3bcu2zfn2jkkgrxntomq6rqzpsbkqjhpzq.py
# Topologically Sorted Source Nodes: [x_1, x_2, x_3], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index]
# Source node to ATen node mapping:
# x_1 => convolution
# x_2 => relu
# x_3 => _unsafe_index_1
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index, %primals_2, %primals_3, [1], [1], [1], False, [0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu, [None, None, %convert_element_type_3]), kwargs = {})
triton_poi_fused__unsafe_index_convolution_relu_2 = async_compile.triton('triton_poi_fused__unsafe_index_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4194304],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_convolution_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16000
x3 = (xindex // 16000)
x1 = (xindex // 16000) % 64
x4 = xindex
tmp6 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tmp5 = tl.load(in_ptr0 + (tmp4 + (8000*x3)), None, eviction_policy='evict_last')
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tl.store(out_ptr0 + (x4), tmp9, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/fw/cfwbdfgwfmnbirs4qwzo2rnkvtw7eufclonbzk7ha42izbalye7k.py
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.convolution, aten.tanh]
# Source node to ATen node mapping:
# x_4 => convolution_1
# x_5 => tanh
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_4, %primals_5, [1], [1], [1], False, [0], 1), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_tanh_3 = async_compile.triton('triton_poi_fused_convolution_tanh_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_tanh_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_tanh_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = libdevice.tanh(tmp3)
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/pr/cprpnxhcfpuhken5i2sbbjssc43itanxgwa6m3qyev5ih6o3f4mj.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => convolution
# x_2 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index, %primals_2, %primals_3, [1], [1], [1], False, [0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_4 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 8000) % 64
x0 = xindex % 8000
x4 = (xindex // 8000)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x0 + (8064*x4)), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 128, 4), (512, 4, 1))
assert_size_stride(primals_2, (64, 128, 3), (384, 3, 1))
assert_size_stride(primals_3, (64, ), (1, ))
assert_size_stride(primals_4, (1, 64, 3), (192, 3, 1))
assert_size_stride(primals_5, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 128, 8000), (1024000, 8000, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten._unsafe_index]
stream0 = get_raw_stream(0)
triton_poi_fused__unsafe_index_0.run(primals_1, buf0, 4096000, grid=grid(4096000), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 64, 8000), (512000, 8000, 1))
buf2 = empty_strided_cuda((16000, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
triton_poi_fused__to_copy_add_arange_mul_1.run(buf2, 16000, grid=grid(16000), stream=stream0)
buf3 = empty_strided_cuda((4, 64, 16000), (1024000, 16000, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1, x_2, x_3], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index]
triton_poi_fused__unsafe_index_convolution_relu_2.run(buf1, primals_3, buf3, 4096000, grid=grid(4096000), stream=stream0)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf4, (4, 1, 16000), (16000, 16000, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.convolution, aten.tanh]
triton_poi_fused_convolution_tanh_3.run(buf5, primals_5, 64000, grid=grid(64000), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((4, 64, 8000), (516096, 8064, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_4.run(buf1, primals_3, buf6, 2048000, grid=grid(2048000), stream=stream0)
del buf1
del primals_3
return (buf5, primals_2, primals_4, buf0, buf2, buf3, buf5, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 128, 4), (512, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, 128, 3), (384, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 64, 3), (192, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Decoder(nn.Module):
def __init__(self, sampling_rate=16000.0):
super(Decoder, self).__init__()
self.sampling_rate = sampling_rate
self.upsa1 = torch.nn.Upsample(int(sampling_rate / 2))
self.conv3 = torch.nn.Conv1d(128, 64, 3, padding=1)
self.upsa2 = torch.nn.Upsample(int(sampling_rate))
self.conv4 = torch.nn.Conv1d(64, 1, 3, padding=1)
def forward(self, x):
x = self.upsa1(x)
x = self.conv3(x)
x = torch.relu(x)
x = self.upsa2(x)
x = self.conv4(x)
x = torch.tanh(x)
return x
def get_inputs():
return [torch.rand([4, 128, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8000
x1 = xindex // 8000
x2 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.0005
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tmp5 = tl.load(in_ptr0 + (tmp4 + 4 * x1), None, eviction_policy=
'evict_last')
tl.store(out_ptr0 + x2, tmp5, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_1(out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_convolution_relu_2(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16000
x3 = xindex // 16000
x1 = xindex // 16000 % 64
x4 = xindex
tmp6 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tmp5 = tl.load(in_ptr0 + (tmp4 + 8000 * x3), None, eviction_policy=
'evict_last')
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tl.store(out_ptr0 + x4, tmp9, None)
@triton.jit
def triton_poi_fused_convolution_tanh_3(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = libdevice.tanh(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_4(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 8000 % 64
x0 = xindex % 8000
x4 = xindex // 8000
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x0 + 8064 * x4), tmp6, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 128, 4), (512, 4, 1))
assert_size_stride(primals_2, (64, 128, 3), (384, 3, 1))
assert_size_stride(primals_3, (64,), (1,))
assert_size_stride(primals_4, (1, 64, 3), (192, 3, 1))
assert_size_stride(primals_5, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 128, 8000), (1024000, 8000, 1), torch
.float32)
get_raw_stream(0)
triton_poi_fused__unsafe_index_0[grid(4096000)](primals_1, buf0,
4096000, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,),
padding=(1,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 64, 8000), (512000, 8000, 1))
buf2 = empty_strided_cuda((16000,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_1[grid(16000)](buf2, 16000,
XBLOCK=128, num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((4, 64, 16000), (1024000, 16000, 1),
torch.float32)
triton_poi_fused__unsafe_index_convolution_relu_2[grid(4096000)](buf1,
primals_3, buf3, 4096000, XBLOCK=512, num_warps=8, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1,),
padding=(1,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf4, (4, 1, 16000), (16000, 16000, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_tanh_3[grid(64000)](buf5, primals_5,
64000, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 64, 8000), (516096, 8064, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_4[grid(2048000)](
buf1, primals_3, buf6, 2048000, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf1
del primals_3
return buf5, primals_2, primals_4, buf0, buf2, buf3, buf5, buf6
class DecoderNew(nn.Module):
def __init__(self, sampling_rate=16000.0):
super(DecoderNew, self).__init__()
self.sampling_rate = sampling_rate
self.upsa1 = torch.nn.Upsample(int(sampling_rate / 2))
self.conv3 = torch.nn.Conv1d(128, 64, 3, padding=1)
self.upsa2 = torch.nn.Upsample(int(sampling_rate))
self.conv4 = torch.nn.Conv1d(64, 1, 3, padding=1)
def forward(self, input_0):
primals_2 = self.conv3.weight
primals_3 = self.conv3.bias
primals_4 = self.conv4.weight
primals_5 = self.conv4.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| Koukyosyumei/Zatsuon | Decoder | false | 2,461 | [
"Apache-2.0"
] | 0 | d7f520a282cf00bfd19d2dec300701c21403cba1 | https://github.com/Koukyosyumei/Zatsuon/tree/d7f520a282cf00bfd19d2dec300701c21403cba1 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, sampling_rate=16000.0):
super().__init__()
self.sampling_rate = sampling_rate
self.upsa1 = torch.nn.Upsample(int(sampling_rate / 2))
self.conv3 = torch.nn.Conv1d(128, 64, 3, padding=1)
self.upsa2 = torch.nn.Upsample(int(sampling_rate))
self.conv4 = torch.nn.Conv1d(64, 1, 3, padding=1)
def forward(self, x):
x = self.upsa1(x)
x = self.conv3(x)
x = torch.relu(x)
x = self.upsa2(x)
x = self.conv4(x)
x = torch.tanh(x)
return x
def get_inputs():
return [torch.rand([4, 128, 4])]
def get_init_inputs():
return []
|
Critic | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ms/cmsuzohbg5nq52jnvirovzkvykrzzko5xomu7zyu5e5u2lhegppw.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/5b/c5br3r4gpi7zzaygqfdgcqeerwiekt2d2t2wkw4sj54lam6radgq.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_4), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (1, 4), (4, 1))
assert_size_stride(primals_8, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1)
del primals_3
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf2, primals_4, 16, grid=grid(16), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf3)
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf4, primals_6, 16, grid=grid(16), stream=stream0)
del primals_6
buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf6)
del primals_8
return (buf6, buf0, buf2, buf4, primals_7, primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Critic(nn.Module):
def __init__(self, n_obs, output_dim, hidden_size, init_w=0.003):
super(Critic, self).__init__()
self.linear1 = nn.Linear(n_obs + output_dim, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, 1)
self.linear3.weight.data.uniform_(-init_w, init_w)
self.linear3.bias.data.uniform_(-init_w, init_w)
def forward(self, state, action):
x = torch.cat([state, action], 1)
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x = self.linear3(x)
return x
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'n_obs': 4, 'output_dim': 4, 'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (1, 4), (4, 1))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8
), 0), out=buf1)
del primals_3
buf2 = buf1
del buf1
triton_poi_fused_relu_1[grid(16)](buf2, primals_4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (4, 4), (1, 4
), 0), out=buf3)
buf4 = buf3
del buf3
triton_poi_fused_relu_1[grid(16)](buf4, primals_6, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_6
buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7,
(4, 1), (1, 4), 0), alpha=1, beta=1, out=buf6)
del primals_8
return buf6, buf0, buf2, buf4, primals_7, primals_5
class CriticNew(nn.Module):
def __init__(self, n_obs, output_dim, hidden_size, init_w=0.003):
super(CriticNew, self).__init__()
self.linear1 = nn.Linear(n_obs + output_dim, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, 1)
self.linear3.weight.data.uniform_(-init_w, init_w)
self.linear3.bias.data.uniform_(-init_w, init_w)
def forward(self, input_0, input_1):
primals_3 = self.linear1.weight
primals_4 = self.linear1.bias
primals_1 = self.linear2.weight
primals_6 = self.linear2.bias
primals_7 = self.linear3.weight
primals_8 = self.linear3.bias
primals_2 = input_0
primals_5 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| KhalilWong/Learn-RL | Critic | false | 2,462 | [
"MIT"
] | 0 | 9f63c5adafab1413362366d28d8711096ce6648c | https://github.com/KhalilWong/Learn-RL/tree/9f63c5adafab1413362366d28d8711096ce6648c | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, n_obs, output_dim, hidden_size, init_w=0.003):
super().__init__()
self.linear1 = nn.Linear(n_obs + output_dim, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, 1)
self.linear3.weight.data.uniform_(-init_w, init_w)
self.linear3.bias.data.uniform_(-init_w, init_w)
def forward(self, state, action):
x = torch.cat([state, action], 1)
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x = self.linear3(x)
return x
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.