entry_point
stringlengths 1
65
| original_triton_code
stringlengths 4.5k
619k
| python_code
stringlengths 208
60.9k
| triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
sequencelengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
| pytorch_code
stringlengths 200
4.05k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|
Pooling | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/je/cje3udpfj5nwaq4lakytobuhk4kwgcmkt62m7sy65mea2frdue7u.py
# Topologically Sorted Source Nodes: [avg_pool2d, sub], Original ATen: [aten.avg_pool2d, aten.sub]
# Source node to ATen node mapping:
# avg_pool2d => avg_pool2d
# sub => sub
# Graph fragment:
# %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%arg0_1, [3, 3], [1, 1], [1, 1], False, False), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%avg_pool2d, %arg0_1), kwargs = {})
triton_poi_fused_avg_pool2d_sub_0 = async_compile.triton('triton_poi_fused_avg_pool2d_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_sub_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x0 = xindex % 4
x3 = xindex
tmp54 = tl.load(in_ptr0 + (x3), xmask)
tmp0 = (-1) + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = (-1) + x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + ((-5) + x3), tmp10 & xmask, other=0.0)
tmp12 = x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + ((-4) + x3), tmp16 & xmask, other=0.0)
tmp18 = tmp17 + tmp11
tmp19 = 1 + x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + ((-3) + x3), tmp23 & xmask, other=0.0)
tmp25 = tmp24 + tmp18
tmp26 = x1
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + ((-1) + x3), tmp30 & xmask, other=0.0)
tmp32 = tmp31 + tmp25
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (x3), tmp33 & xmask, other=0.0)
tmp35 = tmp34 + tmp32
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (1 + x3), tmp36 & xmask, other=0.0)
tmp38 = tmp37 + tmp35
tmp39 = 1 + x1
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (3 + x3), tmp43 & xmask, other=0.0)
tmp45 = tmp44 + tmp38
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4 + x3), tmp46 & xmask, other=0.0)
tmp48 = tmp47 + tmp45
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (5 + x3), tmp49 & xmask, other=0.0)
tmp51 = tmp50 + tmp48
tmp52 = (((0) * ((0) >= ((-1) + x0)) + ((-1) + x0) * (((-1) + x0) > (0)))*((0) * ((0) >= ((-1) + x1)) + ((-1) + x1) * (((-1) + x1) > (0)))) + (((4) * ((4) <= (2 + x0)) + (2 + x0) * ((2 + x0) < (4)))*((4) * ((4) <= (2 + x1)) + (2 + x1) * ((2 + x1) < (4)))) + ((-1)*((0) * ((0) >= ((-1) + x0)) + ((-1) + x0) * (((-1) + x0) > (0)))*((4) * ((4) <= (2 + x1)) + (2 + x1) * ((2 + x1) < (4)))) + ((-1)*((0) * ((0) >= ((-1) + x1)) + ((-1) + x1) * (((-1) + x1) > (0)))*((4) * ((4) <= (2 + x0)) + (2 + x0) * ((2 + x0) < (4))))
tmp53 = tmp51 / tmp52
tmp55 = tmp53 - tmp54
tl.store(in_out_ptr0 + (x3), tmp55, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [avg_pool2d, sub], Original ATen: [aten.avg_pool2d, aten.sub]
stream0 = get_raw_stream(0)
triton_poi_fused_avg_pool2d_sub_0.run(buf1, arg0_1, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Pooling(nn.Module):
"""
Implementation of pooling for PoolFormer
--pool_size: pooling size
"""
def __init__(self, pool_size=3):
super().__init__()
self.pool = nn.AvgPool2d(pool_size, stride=1, padding=pool_size //
2, count_include_pad=False)
def forward(self, x):
return self.pool(x) - x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_avg_pool2d_sub_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x3 = xindex
tmp54 = tl.load(in_ptr0 + x3, xmask)
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-5 + x3), tmp10 & xmask, other=0.0)
tmp12 = x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-4 + x3), tmp16 & xmask, other=0.0)
tmp18 = tmp17 + tmp11
tmp19 = 1 + x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-3 + x3), tmp23 & xmask, other=0.0)
tmp25 = tmp24 + tmp18
tmp26 = x1
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-1 + x3), tmp30 & xmask, other=0.0)
tmp32 = tmp31 + tmp25
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + x3, tmp33 & xmask, other=0.0)
tmp35 = tmp34 + tmp32
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (1 + x3), tmp36 & xmask, other=0.0)
tmp38 = tmp37 + tmp35
tmp39 = 1 + x1
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (3 + x3), tmp43 & xmask, other=0.0)
tmp45 = tmp44 + tmp38
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4 + x3), tmp46 & xmask, other=0.0)
tmp48 = tmp47 + tmp45
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (5 + x3), tmp49 & xmask, other=0.0)
tmp51 = tmp50 + tmp48
tmp52 = (0 * (0 >= -1 + x0) + (-1 + x0) * (-1 + x0 > 0)) * (0 * (0 >= -
1 + x1) + (-1 + x1) * (-1 + x1 > 0)) + (4 * (4 <= 2 + x0) + (2 + x0
) * (2 + x0 < 4)) * (4 * (4 <= 2 + x1) + (2 + x1) * (2 + x1 < 4)
) + -1 * (0 * (0 >= -1 + x0) + (-1 + x0) * (-1 + x0 > 0)) * (4 * (4 <=
2 + x1) + (2 + x1) * (2 + x1 < 4)) + -1 * (0 * (0 >= -1 + x1) + (-1 +
x1) * (-1 + x1 > 0)) * (4 * (4 <= 2 + x0) + (2 + x0) * (2 + x0 < 4))
tmp53 = tmp51 / tmp52
tmp55 = tmp53 - tmp54
tl.store(in_out_ptr0 + x3, tmp55, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_avg_pool2d_sub_0[grid(256)](buf1, arg0_1, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf1,
class PoolingNew(nn.Module):
"""
Implementation of pooling for PoolFormer
--pool_size: pooling size
"""
def __init__(self, pool_size=3):
super().__init__()
self.pool = nn.AvgPool2d(pool_size, stride=1, padding=pool_size //
2, count_include_pad=False)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| TranNhiem/MA_SSRL_Pytorch | Pooling | false | 1,141 | [
"MIT"
] | 0 | 87d946461850240fdd54de761603f13ef3710c2b | https://github.com/TranNhiem/MA_SSRL_Pytorch/tree/87d946461850240fdd54de761603f13ef3710c2b | import torch
import torch.nn as nn
class Model(nn.Module):
"""
Implementation of pooling for PoolFormer
--pool_size: pooling size
"""
def __init__(self, pool_size=3):
super().__init__()
self.pool = nn.AvgPool2d(pool_size, stride=1, padding=pool_size //
2, count_include_pad=False)
def forward(self, x):
return self.pool(x) - x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
TwoWordBilinearLabelProbe | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/v2/cv26xmd6uhnpasjiayrxny6gmnwndswt7vj57asgoudfza6xzwle.py
# Topologically Sorted Source Nodes: [contiguous_1, matmul], Original ATen: [aten.clone, aten.view]
# Source node to ATen node mapping:
# contiguous_1 => clone_2
# matmul => view_1
# Graph fragment:
# %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
# %view_1 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%clone_2, [64, 4]), kwargs = {})
triton_poi_fused_clone_view_0 = async_compile.triton('triton_poi_fused_clone_view_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_view_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_view_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*(x1 // 4))), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/7q/c7qgwbp7h2p2gm2mjyswszqjvgaiq3hw6ul3ljm3kx5kacqyu56x.py
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = (xindex // 64)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/nt/cntmnffcozkeu7dmnap4ub3giwedzhutvy2zd57fhzbmevu6xzld.py
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%bmm, %primals_4), kwargs = {})
triton_poi_fused_add_2 = async_compile.triton('triton_poi_fused_add_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [proj], Original ATen: [aten.mm]
extern_kernels.mm(primals_2, primals_3, out=buf0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous_1, matmul], Original ATen: [aten.clone, aten.view]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_view_0.run(primals_1, buf1, 256, grid=grid(256), stream=stream0)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm]
extern_kernels.mm(buf1, buf0, out=buf2)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(primals_1, buf3, 256, grid=grid(256), stream=stream0)
del primals_1
buf4 = empty_strided_cuda((64, 1, 1), (1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf2, (64, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf3, (64, 4, 1), (4, 1, 0), 0), out=buf4)
del buf2
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
triton_poi_fused_add_2.run(buf5, primals_4, 64, grid=grid(64), stream=stream0)
del primals_4
return (reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf3, (64, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf1, (4, 64), (1, 4), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data.dataloader
class TwoWordBilinearLabelProbe(nn.Module):
""" Computes a bilinear function of pairs of vectors.
For a batch of sentences, computes all n^2 pairs of scores
for each sentence in the batch.
"""
def __init__(self, model_dim, rank, prob, device):
super(TwoWordBilinearLabelProbe, self).__init__()
self.maximum_rank = rank
self.model_dim = model_dim
self.proj_L = nn.Parameter(data=torch.zeros(self.model_dim, self.
maximum_rank))
self.proj_R = nn.Parameter(data=torch.zeros(self.maximum_rank, self
.model_dim))
self.bias = nn.Parameter(data=torch.zeros(1))
nn.init.uniform_(self.proj_L, -0.05, 0.05)
nn.init.uniform_(self.proj_R, -0.05, 0.05)
nn.init.uniform_(self.bias, -0.05, 0.05)
self
self.dropout = nn.Dropout(p=prob)
def forward(self, batch):
""" Computes all n^2 pairs of attachment scores
for each sentence in a batch.
Computes h_i^TAh_j for all i,j
where A = LR, L in R^{model_dim x maximum_rank}; R in R^{maximum_rank x model_rank}
hence A is rank-constrained to maximum_rank.
Args:
batch: a batch of word representations of the shape
(batch_size, max_seq_len, representation_dim)
Returns:
A tensor of scores of shape (batch_size, max_seq_len, max_seq_len)
"""
batchlen, seqlen, rank = batch.size()
batch = self.dropout(batch)
proj = torch.mm(self.proj_L, self.proj_R)
batch_square = batch.unsqueeze(2).expand(batchlen, seqlen, seqlen, rank
)
batch_transposed = batch.unsqueeze(1).expand(batchlen, seqlen,
seqlen, rank).contiguous().view(batchlen * seqlen * seqlen, rank, 1
)
psd_transformed = torch.matmul(batch_square.contiguous(), proj).view(
batchlen * seqlen * seqlen, 1, rank)
logits = (torch.bmm(psd_transformed, batch_transposed) + self.bias
).view(batchlen, seqlen, seqlen)
return logits
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'model_dim': 4, 'rank': 4, 'prob': 0.5, 'device': 0}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data.dataloader
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_view_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * (x1 // 4)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_2, primals_3, out=buf0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_view_0[grid(256)](primals_1, buf1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(buf1, buf0, out=buf2)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_1[grid(256)](primals_1, buf3, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_1
buf4 = empty_strided_cuda((64, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf2, (64, 1, 4), (4, 4, 1),
0), reinterpret_tensor(buf3, (64, 4, 1), (4, 1, 0), 0), out=buf4)
del buf2
buf5 = buf4
del buf4
triton_poi_fused_add_2[grid(64)](buf5, primals_4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_4
return reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(buf3, (64, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf1, (4, 64), (1, 4), 0), reinterpret_tensor(
primals_2, (4, 4), (1, 4), 0), reinterpret_tensor(primals_3, (4, 4),
(1, 4), 0)
class TwoWordBilinearLabelProbeNew(nn.Module):
""" Computes a bilinear function of pairs of vectors.
For a batch of sentences, computes all n^2 pairs of scores
for each sentence in the batch.
"""
def __init__(self, model_dim, rank, prob, device):
super(TwoWordBilinearLabelProbeNew, self).__init__()
self.maximum_rank = rank
self.model_dim = model_dim
self.proj_L = nn.Parameter(data=torch.zeros(self.model_dim, self.
maximum_rank))
self.proj_R = nn.Parameter(data=torch.zeros(self.maximum_rank, self
.model_dim))
self.bias = nn.Parameter(data=torch.zeros(1))
nn.init.uniform_(self.proj_L, -0.05, 0.05)
nn.init.uniform_(self.proj_R, -0.05, 0.05)
nn.init.uniform_(self.bias, -0.05, 0.05)
self
self.dropout = nn.Dropout(p=prob)
def forward(self, input_0):
primals_2 = self.proj_L
primals_3 = self.proj_R
primals_4 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| TimO96/NLP2 | TwoWordBilinearLabelProbe | false | 1,142 | [
"MIT"
] | 0 | 83f65a385457f68397c641f38b53df0110282578 | https://github.com/TimO96/NLP2/tree/83f65a385457f68397c641f38b53df0110282578 | import torch
import torch.nn as nn
import torch.utils.data.dataloader
class Model(nn.Module):
""" Computes a bilinear function of pairs of vectors.
For a batch of sentences, computes all n^2 pairs of scores
for each sentence in the batch.
"""
def __init__(self, model_dim, rank, prob, device):
super().__init__()
self.maximum_rank = rank
self.model_dim = model_dim
self.proj_L = nn.Parameter(data=torch.zeros(self.model_dim, self.
maximum_rank))
self.proj_R = nn.Parameter(data=torch.zeros(self.maximum_rank, self
.model_dim))
self.bias = nn.Parameter(data=torch.zeros(1))
nn.init.uniform_(self.proj_L, -0.05, 0.05)
nn.init.uniform_(self.proj_R, -0.05, 0.05)
nn.init.uniform_(self.bias, -0.05, 0.05)
self
self.dropout = nn.Dropout(p=prob)
def forward(self, batch):
""" Computes all n^2 pairs of attachment scores
for each sentence in a batch.
Computes h_i^TAh_j for all i,j
where A = LR, L in R^{model_dim x maximum_rank}; R in R^{maximum_rank x model_rank}
hence A is rank-constrained to maximum_rank.
Args:
batch: a batch of word representations of the shape
(batch_size, max_seq_len, representation_dim)
Returns:
A tensor of scores of shape (batch_size, max_seq_len, max_seq_len)
"""
batchlen, seqlen, rank = batch.size()
batch = self.dropout(batch)
proj = torch.mm(self.proj_L, self.proj_R)
batch_square = batch.unsqueeze(2).expand(batchlen, seqlen, seqlen, rank
)
batch_transposed = batch.unsqueeze(1).expand(batchlen, seqlen,
seqlen, rank).contiguous().view(batchlen * seqlen * seqlen, rank, 1
)
psd_transformed = torch.matmul(batch_square.contiguous(), proj).view(
batchlen * seqlen * seqlen, 1, rank)
logits = (torch.bmm(psd_transformed, batch_transposed) + self.bias
).view(batchlen, seqlen, seqlen)
return logits
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4, 0.5, 0]
|
Policy | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/md/cmd3ewacyhu5w5hausgbjbmtnt5rr66cgczh4ibdypq7dz6p4v7g.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (128, ), (1, ))
assert_size_stride(primals_3, (128, 4), (4, 1))
assert_size_stride(primals_4, (4, 128), (128, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 128), (1, 4), 0), out=buf0)
del primals_3
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf0 # reuse
buf3 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf3, 8192, grid=grid(8192), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 4), (1, 128), 0), alpha=1, beta=1, out=buf2)
del primals_5
return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 128), (128, 1), 0), primals_4, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((128, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from torch.nn import functional as F
class Policy(nn.Module):
def __init__(self, act_dim, obs_dim):
super(Policy, self).__init__()
self.fc0 = nn.Linear(act_dim, 128)
self.fc1 = nn.Linear(128, obs_dim)
def forward(self, x):
x = x.type_as(self.fc0.bias)
x = F.relu(self.fc0(x))
x = F.dropout(x, training=self.training)
x = self.fc1(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'act_dim': 4, 'obs_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (128,), (1,))
assert_size_stride(primals_3, (128, 4), (4, 1))
assert_size_stride(primals_4, (4, 128), (128, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 128), (1, 4), 0), out=buf0)
del primals_3
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1,
primals_2, buf3, 8192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128),
(128, 1), 0), reinterpret_tensor(primals_4, (128, 4), (1, 128),
0), alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 128), (128, 1), 0), primals_4, buf3
class PolicyNew(nn.Module):
def __init__(self, act_dim, obs_dim):
super(PolicyNew, self).__init__()
self.fc0 = nn.Linear(act_dim, 128)
self.fc1 = nn.Linear(128, obs_dim)
def forward(self, input_0):
primals_3 = self.fc0.weight
primals_2 = self.fc0.bias
primals_4 = self.fc1.weight
primals_5 = self.fc1.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| TommeyChang/CS294-Homework | Policy | false | 1,143 | [
"MIT"
] | 0 | 17b525bf4366034b45c4febd89f1053d44550237 | https://github.com/TommeyChang/CS294-Homework/tree/17b525bf4366034b45c4febd89f1053d44550237 | import torch
from torch import nn
from torch.nn import functional as F
class Model(nn.Module):
def __init__(self, act_dim, obs_dim):
super().__init__()
self.fc0 = nn.Linear(act_dim, 128)
self.fc1 = nn.Linear(128, obs_dim)
def forward(self, x):
x = x.type_as(self.fc0.bias)
x = F.relu(self.fc0(x))
x = F.dropout(x, training=self.training)
x = self.fc1(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
CrossEntropyLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/65/c65frvogjvzvcjnoj7n72ziopkhhgusygsvovz7h4ukukiilkzeo.py
# Topologically Sorted Source Nodes: [cross_entropy], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# cross_entropy => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %amax), kwargs = {})
triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ig/cigklgjvndpfvc7rkzbq2ej3kvwcxbbuyizweiydfjy7unf2pgnl.py
# Topologically Sorted Source Nodes: [label_batch, cross_entropy], Original ATen: [aten._to_copy, aten.nll_loss_forward]
# Source node to ATen node mapping:
# cross_entropy => full_default_1, ne_2, neg, sum_4, where_1
# label_batch => convert_element_type_1
# Graph fragment:
# %convert_element_type_1 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%arg2_1, torch.int64), kwargs = {})
# %ne_2 : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%convert_element_type_1, -1), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%squeeze,), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%ne_2, %neg, %full_default_1), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%where_1,), kwargs = {})
triton_per_fused__to_copy_nll_loss_forward_1 = async_compile.triton('triton_per_fused__to_copy_nll_loss_forward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_nll_loss_forward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__to_copy_nll_loss_forward_1(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp12 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr1 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp1 = tmp0.to(tl.int64)
tmp2 = tl.full([1, 1], -1, tl.int64)
tmp3 = tmp1 != tmp2
tmp4 = tl.full([1, 1], 0, tl.int64)
tmp5 = tl.where(tmp3, tmp1, tmp4)
tmp6 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp7 = tmp5 + tmp6
tmp8 = tmp5 < 0
tmp9 = tl.where(tmp8, tmp7, tmp5)
tl.device_assert((0 <= tmp9) & (tmp9 < 4), "index out of bounds: 0 <= tmp9 < 4")
tmp11 = tl.load(in_ptr1 + (tmp9 + (4*r0)), None, eviction_policy='evict_last')
tmp13 = tl_math.exp(tmp12)
tmp15 = tl_math.exp(tmp14)
tmp16 = tmp13 + tmp15
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp23 = tl_math.log(tmp22)
tmp24 = tmp11 - tmp23
tmp25 = -tmp24
tmp26 = 0.0
tmp27 = tl.where(tmp3, tmp25, tmp26)
tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK])
tmp30 = tl.sum(tmp28, 1)[:, None]
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp30, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/kd/ckd7oru5le43u4lcqhsj4wqfgosdbs3zxvplwfqb4rmznyztvnrb.py
# Topologically Sorted Source Nodes: [ne, sum_1, total_sents, cross_entropy_loss], Original ATen: [aten.ne, aten.sum, aten._to_copy, aten.div]
# Source node to ATen node mapping:
# cross_entropy_loss => div
# ne => ne
# sum_1 => sum_1
# total_sents => convert_element_type
# Graph fragment:
# %ne : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%arg1_1, 0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%ne,), kwargs = {})
# %convert_element_type : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%sum_1, torch.float32), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_4, %convert_element_type), kwargs = {})
triton_per_fused__to_copy_div_ne_sum_2 = async_compile.triton('triton_per_fused__to_copy_div_ne_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_div_ne_sum_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__to_copy_div_ne_sum_2(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp8 = tl.load(in_out_ptr0 + (0))
tmp9 = tl.broadcast_to(tmp8, [1])
tmp1 = 0.0
tmp2 = tmp0 != tmp1
tmp3 = tmp2.to(tl.int64)
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = tmp6.to(tl.float32)
tmp10 = tmp9 / tmp7
tl.store(out_ptr1 + (tl.full([1], 0, tl.int32)), tmp7, None)
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp10, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (16, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cross_entropy], Original ATen: [aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [label_batch, cross_entropy], Original ATen: [aten._to_copy, aten.nll_loss_forward]
triton_per_fused__to_copy_nll_loss_forward_1.run(arg2_1, buf0, buf1, 1, 16, grid=grid(1), stream=stream0)
del arg2_1
del buf0
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [ne, sum_1, total_sents, cross_entropy_loss], Original ATen: [aten.ne, aten.sum, aten._to_copy, aten.div]
triton_per_fused__to_copy_div_ne_sum_2.run(buf4, arg1_1, buf3, 1, 256, grid=grid(1), stream=stream0)
del arg1_1
return (buf4, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data.dataloader
class CrossEntropyLoss(nn.Module):
"""Custom cross-entropy loss"""
def __init__(self):
super(CrossEntropyLoss, self).__init__()
self.pytorch_ce_loss = torch.nn.CrossEntropyLoss(ignore_index=-1,
reduction='sum')
def forward(self, predictions, label_batch, length_batch):
"""
Computes and returns CrossEntropyLoss.
Ignores all entries where label_batch=-1
Noralizes by the number of sentences in the batch.
Args:
predictions: A pytorch batch of logits
label_batch: A pytorch batch of label indices
length_batch: A pytorch batch of sentence lengths
Returns:
A tuple of:
cross_entropy_loss: average loss in the batch
total_sents: number of sentences in the batch
"""
batchlen, seqlen, class_count = predictions.size()
total_sents = torch.sum(length_batch != 0).float()
predictions = predictions.view(batchlen * seqlen, class_count)
label_batch = label_batch.view(batchlen * seqlen).long()
cross_entropy_loss = self.pytorch_ce_loss(predictions, label_batch
) / total_sents
return cross_entropy_loss, total_sents
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([16]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data.dataloader
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_per_fused__to_copy_nll_loss_forward_1(in_ptr0, in_ptr1, out_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp12 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp1 = tmp0.to(tl.int64)
tmp2 = tl.full([1, 1], -1, tl.int64)
tmp3 = tmp1 != tmp2
tmp4 = tl.full([1, 1], 0, tl.int64)
tmp5 = tl.where(tmp3, tmp1, tmp4)
tmp6 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp7 = tmp5 + tmp6
tmp8 = tmp5 < 0
tmp9 = tl.where(tmp8, tmp7, tmp5)
tl.device_assert((0 <= tmp9) & (tmp9 < 4),
'index out of bounds: 0 <= tmp9 < 4')
tmp11 = tl.load(in_ptr1 + (tmp9 + 4 * r0), None, eviction_policy=
'evict_last')
tmp13 = tl_math.exp(tmp12)
tmp15 = tl_math.exp(tmp14)
tmp16 = tmp13 + tmp15
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp23 = tl_math.log(tmp22)
tmp24 = tmp11 - tmp23
tmp25 = -tmp24
tmp26 = 0.0
tmp27 = tl.where(tmp3, tmp25, tmp26)
tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK])
tmp30 = tl.sum(tmp28, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp30, None)
@triton.jit
def triton_per_fused__to_copy_div_ne_sum_2(in_out_ptr0, in_ptr0, out_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp8 = tl.load(in_out_ptr0 + 0)
tmp9 = tl.broadcast_to(tmp8, [1])
tmp1 = 0.0
tmp2 = tmp0 != tmp1
tmp3 = tmp2.to(tl.int64)
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = tmp6.to(tl.float32)
tmp10 = tmp9 / tmp7
tl.store(out_ptr1 + tl.full([1], 0, tl.int32), tmp7, None)
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (16,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(64)](arg0_1, buf0, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((), (), torch.float32)
triton_per_fused__to_copy_nll_loss_forward_1[grid(1)](arg2_1, buf0,
buf1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg2_1
del buf0
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf1
del buf1
triton_per_fused__to_copy_div_ne_sum_2[grid(1)](buf4, arg1_1, buf3,
1, 256, num_warps=2, num_stages=1)
del arg1_1
return buf4, buf3
class CrossEntropyLossNew(nn.Module):
"""Custom cross-entropy loss"""
def __init__(self):
super(CrossEntropyLossNew, self).__init__()
self.pytorch_ce_loss = torch.nn.CrossEntropyLoss(ignore_index=-1,
reduction='sum')
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg2_1 = input_1
arg1_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1]
| TimO96/NLP2 | CrossEntropyLoss | false | 1,144 | [
"MIT"
] | 0 | 83f65a385457f68397c641f38b53df0110282578 | https://github.com/TimO96/NLP2/tree/83f65a385457f68397c641f38b53df0110282578 | import torch
import torch.nn as nn
import torch.utils.data.dataloader
class Model(nn.Module):
"""Custom cross-entropy loss"""
def __init__(self):
super().__init__()
self.pytorch_ce_loss = torch.nn.CrossEntropyLoss(ignore_index=-1,
reduction='sum')
def forward(self, predictions, label_batch, length_batch):
"""
Computes and returns CrossEntropyLoss.
Ignores all entries where label_batch=-1
Noralizes by the number of sentences in the batch.
Args:
predictions: A pytorch batch of logits
label_batch: A pytorch batch of label indices
length_batch: A pytorch batch of sentence lengths
Returns:
A tuple of:
cross_entropy_loss: average loss in the batch
total_sents: number of sentences in the batch
"""
batchlen, seqlen, class_count = predictions.size()
total_sents = torch.sum(length_batch != 0).float()
predictions = predictions.view(batchlen * seqlen, class_count)
label_batch = label_batch.view(batchlen * seqlen).long()
cross_entropy_loss = self.pytorch_ce_loss(predictions, label_batch
) / total_sents
return cross_entropy_loss, total_sents
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([16]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
GlobalAttentionGeneral | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/qi/cqinh332474qtv7bgen4bcfz2yfclns66jnudr7z7wmvlrgqoduc.py
# Topologically Sorted Source Nodes: [targetT], Original ATen: [aten.clone, aten.transpose]
# Source node to ATen node mapping:
# targetT => clone
# Graph fragment:
# %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
# %permute_5 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%clone, [0, 2, 1]), kwargs = {})
triton_poi_fused_clone_transpose_0 = async_compile.triton('triton_poi_fused_clone_transpose_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_transpose_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_transpose_0(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
y2 = yindex % 4
y3 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (x1 + (16*y0)), xmask & ymask)
tl.store(out_ptr0 + (x1 + (16*y0)), tmp0, xmask & ymask)
tl.store(out_ptr1 + (y2 + (4*x1) + (64*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/hz/chz2sqsqk26mwhf2dxhgh44jfpu2er5yqjftwkzfav5ctqtx5e7f.py
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_2 => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_1, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/pm/cpmy57yidxxfl6wmlh5dsizlsat4uz6k43rz6t4r6h2u4z625i5l.py
# Topologically Sorted Source Nodes: [attn_4], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# attn_4 => clone_1
# Graph fragment:
# %clone_1 : [num_users=3] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (64*y1)), xmask & ymask)
tmp1 = tl.load(in_ptr0 + ((4*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2 + (16*y3)), tmp8, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_2, (4, 4, 4, 1), (16, 4, 1, 1), 0), primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 1), (16, 4, 1, 1))
buf1 = empty_strided_cuda((4, 16, 4), (64, 1, 16), torch.float32)
buf6 = empty_strided_cuda((4, 4, 16), (64, 1, 4), torch.float32)
# Topologically Sorted Source Nodes: [targetT], Original ATen: [aten.clone, aten.transpose]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_transpose_0.run(primals_1, buf1, buf6, 16, 16, grid=grid(16, 16), stream=stream0)
del primals_1
buf2 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [targetT, attn], Original ATen: [aten.clone, aten.bmm]
extern_kernels.bmm(buf1, reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), out=buf2)
buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf2, buf3, 256, grid=grid(256), stream=stream0)
buf4 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_4], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf3, buf4, 16, 16, grid=grid(16, 16), stream=stream0)
buf5 = reinterpret_tensor(buf3, (4, 4, 16), (64, 16, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [weightedContext], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), buf4, out=buf5)
return (reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_3, reinterpret_tensor(primals_2, (4, 4, 4, 1), (16, 4, 1, 1), 0), buf2, reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf4, (4, 16, 4), (64, 1, 16), 0), buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.parallel
import torch.onnx
def conv1x1(in_planes, out_planes, bias=False):
"""1x1 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1,
padding=0, bias=bias)
class GlobalAttentionGeneral(nn.Module):
def __init__(self, idf, cdf):
super(GlobalAttentionGeneral, self).__init__()
self.conv_context = conv1x1(cdf, idf)
self.sm = nn.Softmax()
self.mask = None
def applyMask(self, mask):
self.mask = mask
def forward(self, input, context):
"""
input: batch x idf x ih x iw (queryL=ihxiw)
context: batch x cdf x sourceL
"""
ih, iw = input.size(2), input.size(3)
queryL = ih * iw
batch_size, sourceL = context.size(0), context.size(2)
target = input.view(batch_size, -1, queryL)
targetT = torch.transpose(target, 1, 2).contiguous()
sourceT = context.unsqueeze(3)
sourceT = self.conv_context(sourceT).squeeze(3)
attn = torch.bmm(targetT, sourceT)
attn = attn.view(batch_size * queryL, sourceL)
if self.mask is not None:
mask = self.mask.repeat(queryL, 1)
attn.data.masked_fill_(mask.data.bool(), -float('inf'))
attn = self.sm(attn)
attn = attn.view(batch_size, queryL, sourceL)
attn = torch.transpose(attn, 1, 2).contiguous()
weightedContext = torch.bmm(sourceT, attn)
weightedContext = weightedContext.view(batch_size, -1, ih, iw)
attn = attn.view(batch_size, -1, ih, iw)
return weightedContext, attn
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'idf': 4, 'cdf': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.parallel
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_transpose_0(in_ptr0, out_ptr0, out_ptr1, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
y2 = yindex % 4
y3 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x1 + 16 * y0), xmask & ymask)
tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask)
tl.store(out_ptr1 + (y2 + 4 * x1 + 64 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp1 = tl.load(in_ptr0 + (4 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2 + 16 * y3), tmp8, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_2, (4,
4, 4, 1), (16, 4, 1, 1), 0), primals_3, stride=(1, 1), padding=
(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0
), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 1), (16, 4, 1, 1))
buf1 = empty_strided_cuda((4, 16, 4), (64, 1, 16), torch.float32)
buf6 = empty_strided_cuda((4, 4, 16), (64, 1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_transpose_0[grid(16, 16)](primals_1, buf1,
buf6, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_1
buf2 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
extern_kernels.bmm(buf1, reinterpret_tensor(buf0, (4, 4, 4), (16, 4,
1), 0), out=buf2)
buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0)
del buf1
triton_poi_fused__softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_poi_fused_clone_2[grid(16, 16)](buf3, buf4, 16, 16, XBLOCK=
16, YBLOCK=16, num_warps=4, num_stages=1)
buf5 = reinterpret_tensor(buf3, (4, 4, 16), (64, 16, 1), 0)
del buf3
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1),
0), buf4, out=buf5)
return reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_3, reinterpret_tensor(primals_2, (4, 4, 4, 1), (16, 4, 1,
1), 0), buf2, reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf4, (4, 16, 4), (64, 1, 16), 0), buf6
def conv1x1(in_planes, out_planes, bias=False):
"""1x1 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1,
padding=0, bias=bias)
class GlobalAttentionGeneralNew(nn.Module):
def __init__(self, idf, cdf):
super(GlobalAttentionGeneralNew, self).__init__()
self.conv_context = conv1x1(cdf, idf)
self.sm = nn.Softmax()
self.mask = None
def applyMask(self, mask):
self.mask = mask
def forward(self, input_0, input_1):
primals_3 = self.conv_context.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0], output[1]
| Thesis-02F/Style-Attn | GlobalAttentionGeneral | false | 1,145 | [
"MIT"
] | 0 | 55f78de4858e395ebf9750a23923fd772600290f | https://github.com/Thesis-02F/Style-Attn/tree/55f78de4858e395ebf9750a23923fd772600290f | import torch
import torch.nn as nn
import torch.nn.parallel
import torch.onnx
def conv1x1(in_planes, out_planes, bias=False):
"""1x1 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1,
padding=0, bias=bias)
class Model(nn.Module):
def __init__(self, idf, cdf):
super().__init__()
self.conv_context = conv1x1(cdf, idf)
self.sm = nn.Softmax()
self.mask = None
def applyMask(self, mask):
self.mask = mask
def forward(self, input, context):
"""
input: batch x idf x ih x iw (queryL=ihxiw)
context: batch x cdf x sourceL
"""
ih, iw = input.size(2), input.size(3)
queryL = ih * iw
batch_size, sourceL = context.size(0), context.size(2)
target = input.view(batch_size, -1, queryL)
targetT = torch.transpose(target, 1, 2).contiguous()
sourceT = context.unsqueeze(3)
sourceT = self.conv_context(sourceT).squeeze(3)
attn = torch.bmm(targetT, sourceT)
attn = attn.view(batch_size * queryL, sourceL)
if self.mask is not None:
mask = self.mask.repeat(queryL, 1)
attn.data.masked_fill_(mask.data.bool(), -float('inf'))
attn = self.sm(attn)
attn = attn.view(batch_size, queryL, sourceL)
attn = torch.transpose(attn, 1, 2).contiguous()
weightedContext = torch.bmm(sourceT, attn)
weightedContext = weightedContext.view(batch_size, -1, ih, iw)
attn = attn.view(batch_size, -1, ih, iw)
return weightedContext, attn
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Mlp | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/tx/ctxovrltdhpfxjn2zu2smrgoqxlijsvlahl3ehyzgagcnkhtwqrh.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.gelu]
# Source node to ATen node mapping:
# x => convolution
# x_1 => add, erf, mul, mul_1, mul_2
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.5), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.7071067811865476), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {})
triton_poi_fused_convolution_gelu_0 = async_compile.triton('triton_poi_fused_convolution_gelu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_gelu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_gelu_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = 0.7071067811865476
tmp6 = tmp2 * tmp5
tmp7 = libdevice.erf(tmp6)
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = tmp4 * tmp9
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/32/c32v7egt4mupqssam3gmac2qgv3ujprjybthsgweflmot256qqw7.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_3 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mul_2, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.gelu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_gelu_0.run(buf1, primals_2, buf2, 256, grid=grid(256), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf4, primals_5, 256, grid=grid(256), stream=stream0)
del primals_5
return (buf4, primals_1, primals_3, primals_4, buf1, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import warnings
import torch.nn as nn
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
"""Copy & paste from PyTorch official master until it's in a few official releases - RW
Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
"""
def norm_cdf(x):
"""Computes standard normal cumulative distribution function"""
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
if mean < a - 2 * std or mean > b + 2 * std:
warnings.warn(
'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. The distribution of values may be incorrect.'
, stacklevel=2)
with torch.no_grad():
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
tensor.uniform_(2 * l - 1, 2 * u - 1)
tensor.erfinv_()
tensor.mul_(std * math.sqrt(2.0))
tensor.add_(mean)
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
"""Copy & paste from PyTorch official master until it's in a few official releases - RW
Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
class Mlp(nn.Module):
"""
Implementation of MLP with 1*1 convolutions.
Input: tensor with shape [B, C, H, W]
"""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Conv2d(in_features, hidden_features, 1)
self.act = act_layer()
self.fc2 = nn.Conv2d(hidden_features, out_features, 1)
self.drop = nn.Dropout(drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Conv2d):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import warnings
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_gelu_0(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = 0.7071067811865476
tmp6 = tmp2 * tmp5
tmp7 = libdevice.erf(tmp6)
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = tmp4 * tmp9
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_gelu_0[grid(256)](buf1, primals_2,
buf2, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_1[grid(256)](buf4, primals_5, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
return buf4, primals_1, primals_3, primals_4, buf1, buf2
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
"""Copy & paste from PyTorch official master until it's in a few official releases - RW
Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
"""
def norm_cdf(x):
"""Computes standard normal cumulative distribution function"""
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
if mean < a - 2 * std or mean > b + 2 * std:
warnings.warn(
'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. The distribution of values may be incorrect.'
, stacklevel=2)
with torch.no_grad():
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
tensor.uniform_(2 * l - 1, 2 * u - 1)
tensor.erfinv_()
tensor.mul_(std * math.sqrt(2.0))
tensor.add_(mean)
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
"""Copy & paste from PyTorch official master until it's in a few official releases - RW
Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
class MlpNew(nn.Module):
"""
Implementation of MLP with 1*1 convolutions.
Input: tensor with shape [B, C, H, W]
"""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Conv2d(in_features, hidden_features, 1)
self.act = act_layer()
self.fc2 = nn.Conv2d(hidden_features, out_features, 1)
self.drop = nn.Dropout(drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Conv2d):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| TranNhiem/solo-learn | Mlp | false | 1,146 | [
"MIT"
] | 0 | 7539732b68d153087d09a26a23e1edfdc49bc086 | https://github.com/TranNhiem/solo-learn/tree/7539732b68d153087d09a26a23e1edfdc49bc086 | import math
import torch
import warnings
import torch.nn as nn
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
"""Copy & paste from PyTorch official master until it's in a few official releases - RW
Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
"""
def norm_cdf(x):
"""Computes standard normal cumulative distribution function"""
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
if mean < a - 2 * std or mean > b + 2 * std:
warnings.warn(
'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. The distribution of values may be incorrect.'
, stacklevel=2)
with torch.no_grad():
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
tensor.uniform_(2 * l - 1, 2 * u - 1)
tensor.erfinv_()
tensor.mul_(std * math.sqrt(2.0))
tensor.add_(mean)
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
"""Copy & paste from PyTorch official master until it's in a few official releases - RW
Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
class Model(nn.Module):
"""
Implementation of MLP with 1*1 convolutions.
Input: tensor with shape [B, C, H, W]
"""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Conv2d(in_features, hidden_features, 1)
self.act = act_layer()
self.fc2 = nn.Conv2d(hidden_features, out_features, 1)
self.drop = nn.Dropout(drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Conv2d):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
TokenMixer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ay/caylcn737p2wwjm32cacv462xdgdut6ho32ptwxfu34t3i2tr75z.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# x_1 => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask)
tl.store(out_ptr0 + (x4), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/2o/c2oeyuwv7gxwqncpovtxrktvflvbh65akpqi3mvsjgsq3we4q3a7.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.add, aten.gelu]
# Source node to ATen node mapping:
# x_1 => add
# x_2 => add_1, erf, mul, mul_1, mul_2
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_3), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.5), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.7071067811865476), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_1,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add_1), kwargs = {})
triton_poi_fused_add_gelu_1 = async_compile.triton('triton_poi_fused_add_gelu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_gelu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_gelu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = 0.7071067811865476
tmp6 = tmp2 * tmp5
tmp7 = libdevice.erf(tmp6)
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = tmp4 * tmp9
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/dz/cdzzulj2qzikpeu5y24dznn2obh4nqag33ymhhfmhaiep723cu6g.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x_5 => add_2
# Graph fragment:
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%permute_3, %primals_1), kwargs = {})
triton_poi_fused_add_2 = async_compile.triton('triton_poi_fused_add_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.add, aten.gelu]
triton_poi_fused_add_gelu_1.run(buf1, primals_3, buf2, 256, grid=grid(256), stream=stream0)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 4, 16, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.add]
triton_poi_fused_add_2.run(buf4, primals_5, primals_1, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_5
return (buf4, primals_3, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class TokenMixer(nn.Module):
def __init__(self, input_size, hidden_size, dropout=None):
super(TokenMixer, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, input_size)
self.dropout = None
if dropout is not None:
self.dropout = nn.Dropout(dropout)
self.activation = nn.GELU()
def forward(self, x):
input = x
x = torch.transpose(x, 1, 2)
x = self.fc1(x)
x = self.activation(x)
if self.dropout is not None:
x = self.dropout(x)
x = self.fc2(x)
if self.dropout is not None:
x = self.dropout(x)
x = torch.transpose(x, 1, 2)
x = x + input
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_gelu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = 0.7071067811865476
tmp6 = tmp2 * tmp5
tmp7 = libdevice.erf(tmp6)
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = tmp4 * tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x4, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](primals_1, buf0, 256, XBLOCK=
128, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_gelu_1[grid(256)](buf1, primals_3, buf2, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 4, 16, 1), 0)
del buf3
triton_poi_fused_add_2[grid(256)](buf4, primals_5, primals_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_5
return buf4, primals_3, reinterpret_tensor(buf0, (64, 4), (4, 1), 0
), buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_4
class TokenMixerNew(nn.Module):
def __init__(self, input_size, hidden_size, dropout=None):
super(TokenMixerNew, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, input_size)
self.dropout = None
if dropout is not None:
self.dropout = nn.Dropout(dropout)
self.activation = nn.GELU()
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| TheRealMarVin/mlp-mixer | TokenMixer | false | 1,147 | [
"MIT"
] | 0 | 2124cb5c5adfc7af473cab535095471d4943adab | https://github.com/TheRealMarVin/mlp-mixer/tree/2124cb5c5adfc7af473cab535095471d4943adab | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, input_size, hidden_size, dropout=None):
super().__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, input_size)
self.dropout = None
if dropout is not None:
self.dropout = nn.Dropout(dropout)
self.activation = nn.GELU()
def forward(self, x):
input = x
x = torch.transpose(x, 1, 2)
x = self.fc1(x)
x = self.activation(x)
if self.dropout is not None:
x = self.dropout(x)
x = self.fc2(x)
if self.dropout is not None:
x = self.dropout(x)
x = torch.transpose(x, 1, 2)
x = x + input
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
SelfAttention | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/pw/cpwtsjylwmnmzrfjyguikuwbleavnwvse35uypvb2lu2crw6ztqm.py
# Topologically Sorted Source Nodes: [wrapped_sqrt, truediv], Original ATen: [aten.sqrt, aten.div]
# Source node to ATen node mapping:
# truediv => div
# wrapped_sqrt => full_default
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 2.0), kwargs = {dtype: torch.float64, layout: torch.strided, device: cpu, pin_memory: False})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg1_1, %full_default), kwargs = {})
triton_poi_fused_div_sqrt_0 = async_compile.triton('triton_poi_fused_div_sqrt_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sqrt_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 2.0
tmp2 = tmp0 / tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/hz/chz2sqsqk26mwhf2dxhgh44jfpu2er5yqjftwkzfav5ctqtx5e7f.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_2, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_2, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/3f/c3fx6bzkalkw7u7askqdnz4rzlcoyqiec4r434sjc5x3axxgkrmr.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => div_1, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [wrapped_sqrt, truediv], Original ATen: [aten.sqrt, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_sqrt_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg1_1
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg0_1, (16, 4, 4), (16, 1, 4), 0), out=buf1)
del arg0_1
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0)
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf2, buf3, 256, grid=grid(256), stream=stream0)
buf4 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf4)
del arg2_1
del buf3
return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
class SelfAttention(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, dropout=0.1):
super(SelfAttention, self).__init__()
self.dropout = nn.Dropout(dropout)
def forward(self, query, key, value, mask=None):
key_dim = key.size(-1)
attn = torch.matmul(query / np.sqrt(key_dim), key.transpose(2, 3))
if mask is not None:
mask = mask.unsqueeze(1)
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = self.dropout(torch.softmax(attn, dim=-1))
output = torch.matmul(attn, value)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 2.0
tmp2 = tmp0 / tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_sqrt_0[grid(256)](arg1_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(arg0_1, (16, 4, 4), (16, 1, 4), 0), out=buf1
)
del arg0_1
buf2 = buf0
del buf0
triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused__softmax_2[grid(256)](buf2, buf3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf4
)
del arg2_1
del buf3
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0),
class SelfAttentionNew(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, dropout=0.1):
super(SelfAttentionNew, self).__init__()
self.dropout = nn.Dropout(dropout)
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| TranQuocTrinh/image_captioning | SelfAttention | false | 1,148 | [
"MIT"
] | 0 | 4c2d77426ba3b9fe9151a15a958320d5298aa190 | https://github.com/TranQuocTrinh/image_captioning/tree/4c2d77426ba3b9fe9151a15a958320d5298aa190 | import torch
import numpy as np
import torch.nn as nn
class Model(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, dropout=0.1):
super().__init__()
self.dropout = nn.Dropout(dropout)
def forward(self, query, key, value, mask=None):
key_dim = key.size(-1)
attn = torch.matmul(query / np.sqrt(key_dim), key.transpose(2, 3))
if mask is not None:
mask = mask.unsqueeze(1)
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = self.dropout(torch.softmax(attn, dim=-1))
output = torch.matmul(attn, value)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return []
|
AveragePoolingLayer | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/l3/cl3qgtljwm55hj7prrlq32vnxhqj5elf2qeptwkrprrhumnm7twn.py
# Topologically Sorted Source Nodes: [avg_pool2d], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# avg_pool2d => avg_pool2d
# Graph fragment:
# %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%arg0_1, [2, 2], [2, 2]), kwargs = {})
triton_poi_fused_avg_pool2d_0 = async_compile.triton('triton_poi_fused_avg_pool2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (8*x1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (5 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [avg_pool2d], Original ATen: [aten.avg_pool2d]
stream0 = get_raw_stream(0)
triton_poi_fused_avg_pool2d_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class AveragePoolingLayer(nn.Module):
"""Implements the average pooling layer.
Basically, this layer can be used to downsample feature maps from spatial
domain.
"""
def __init__(self, scale_factor=2):
super().__init__()
self.scale_factor = scale_factor
def forward(self, x):
ksize = [self.scale_factor, self.scale_factor]
strides = [self.scale_factor, self.scale_factor]
return F.avg_pool2d(x, kernel_size=ksize, stride=strides, padding=0)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class AveragePoolingLayerNew(nn.Module):
"""Implements the average pooling layer.
Basically, this layer can be used to downsample feature maps from spatial
domain.
"""
def __init__(self, scale_factor=2):
super().__init__()
self.scale_factor = scale_factor
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Twizwei/idinvert_pytorch | AveragePoolingLayer | false | 1,149 | [
"MIT"
] | 0 | 11f1126aab517fbe32b488d92f6fdea339463d04 | https://github.com/Twizwei/idinvert_pytorch/tree/11f1126aab517fbe32b488d92f6fdea339463d04 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
"""Implements the average pooling layer.
Basically, this layer can be used to downsample feature maps from spatial
domain.
"""
def __init__(self, scale_factor=2):
super().__init__()
self.scale_factor = scale_factor
def forward(self, x):
ksize = [self.scale_factor, self.scale_factor]
strides = [self.scale_factor, self.scale_factor]
return F.avg_pool2d(x, kernel_size=ksize, stride=strides, padding=0)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
LayerNormChannel | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/zf/czfnaeipqg4a3qzttb2l6zy5ng44vshk3lfmp25jc2er665hxsmw.py
# Topologically Sorted Source Nodes: [u, sub], Original ATen: [aten.mean, aten.sub]
# Source node to ATen node mapping:
# sub => sub
# u => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {})
triton_poi_fused_mean_sub_0 = async_compile.triton('triton_poi_fused_mean_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/xv/cxvd7z437vxfpacdwxehqwb4b4td64d2z3gex2vzjg46iitryrpn.py
# Topologically Sorted Source Nodes: [pow_1, s, add, sqrt, x, mul, x_1], Original ATen: [aten.pow, aten.mean, aten.add, aten.sqrt, aten.div, aten.mul]
# Source node to ATen node mapping:
# add => add
# mul => mul
# pow_1 => pow_1
# s => mean_1
# sqrt => sqrt
# x => div
# x_1 => add_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [1], True), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-05), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_1, %div), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %unsqueeze_3), kwargs = {})
triton_poi_fused_add_div_mean_mul_pow_sqrt_1 = async_compile.triton('triton_poi_fused_add_div_mean_mul_pow_sqrt_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_pow_sqrt_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_sqrt_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 4
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x3), xmask)
tmp2 = tl.load(in_ptr1 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = 4.0
tmp14 = tmp12 / tmp13
tmp15 = 1e-05
tmp16 = tmp14 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp18 = tmp1 / tmp17
tmp19 = tmp0 * tmp18
tmp21 = tmp19 + tmp20
tl.store(out_ptr0 + (x3), tmp21, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [u, sub], Original ATen: [aten.mean, aten.sub]
stream0 = get_raw_stream(0)
triton_poi_fused_mean_sub_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, s, add, sqrt, x, mul, x_1], Original ATen: [aten.pow, aten.mean, aten.add, aten.sqrt, aten.div, aten.mul]
triton_poi_fused_add_div_mean_mul_pow_sqrt_1.run(primals_2, buf0, primals_3, buf1, 256, grid=grid(256), stream=stream0)
del buf0
del primals_2
del primals_3
return (buf1, primals_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class LayerNormChannel(nn.Module):
"""
LayerNorm only for Channel Dimension.
Input: tensor in shape [B, C, H, W]
"""
def __init__(self, num_channels, eps=1e-05):
super().__init__()
self.weight = nn.Parameter(torch.ones(num_channels))
self.bias = nn.Parameter(torch.zeros(num_channels))
self.eps = eps
def forward(self, x):
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = self.weight.unsqueeze(-1).unsqueeze(-1) * x + self.bias.unsqueeze(
-1).unsqueeze(-1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_channels': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_sqrt_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 4
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = 4.0
tmp14 = tmp12 / tmp13
tmp15 = 1e-05
tmp16 = tmp14 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp18 = tmp1 / tmp17
tmp19 = tmp0 * tmp18
tmp21 = tmp19 + tmp20
tl.store(out_ptr0 + x3, tmp21, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_sub_0[grid(256)](primals_1, buf0, 256, XBLOCK
=128, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_pow_sqrt_1[grid(256)](primals_2,
buf0, primals_3, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del primals_2
del primals_3
return buf1, primals_1
class LayerNormChannelNew(nn.Module):
"""
LayerNorm only for Channel Dimension.
Input: tensor in shape [B, C, H, W]
"""
def __init__(self, num_channels, eps=1e-05):
super().__init__()
self.weight = nn.Parameter(torch.ones(num_channels))
self.bias = nn.Parameter(torch.zeros(num_channels))
self.eps = eps
def forward(self, input_0):
primals_2 = self.weight
primals_3 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| TranNhiem/MA_SSRL_Pytorch | LayerNormChannel | false | 1,150 | [
"MIT"
] | 0 | 87d946461850240fdd54de761603f13ef3710c2b | https://github.com/TranNhiem/MA_SSRL_Pytorch/tree/87d946461850240fdd54de761603f13ef3710c2b | import torch
import torch.nn as nn
class Model(nn.Module):
"""
LayerNorm only for Channel Dimension.
Input: tensor in shape [B, C, H, W]
"""
def __init__(self, num_channels, eps=1e-05):
super().__init__()
self.weight = nn.Parameter(torch.ones(num_channels))
self.bias = nn.Parameter(torch.zeros(num_channels))
self.eps = eps
def forward(self, x):
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = self.weight.unsqueeze(-1).unsqueeze(-1) * x + self.bias.unsqueeze(
-1).unsqueeze(-1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Whitening2d | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/pz/cpz2ab5bxm2jrtozlay24l6x3u34xfoykzic62oanlphvhbhsmqq.py
# Topologically Sorted Source Nodes: [xn], Original ATen: [aten.sub]
# Source node to ATen node mapping:
# xn => sub
# Graph fragment:
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze_1, %view_1), kwargs = {})
triton_poi_fused_sub_0 = async_compile.triton('triton_poi_fused_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = 1.0
tmp11 = tmp9 / tmp10
tmp12 = tmp0 - tmp11
tl.store(out_ptr0 + (x2), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/26/c26zwauaaytxanohrf6qcchljjpi4s5jhwcytcndqna5l35tfj5a.py
# Topologically Sorted Source Nodes: [contiguous, T], Original ATen: [aten.clone, aten.view]
# Source node to ATen node mapping:
# T => view_2
# contiguous => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
# %view_2 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%clone, [4, -1]), kwargs = {})
triton_poi_fused_clone_view_1 = async_compile.triton('triton_poi_fused_clone_view_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_view_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_view_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask)
tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/hp/chpqt2szom5jgqscppfvmprzb6esy645vi4ngfkax5vxbvcpriac.py
# Topologically Sorted Source Nodes: [f_cov, mul, eye, eye_1, mul_1, f_cov_shrinked], Original ATen: [aten.div, aten.mul, aten.eye, aten._to_copy, aten.add]
# Source node to ATen node mapping:
# eye => eq, full_default, full_default_1, iota_1, where
# eye_1 => device_put
# f_cov => div
# f_cov_shrinked => add
# mul => mul
# mul_1 => mul_1
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mm, 3), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, 1.0), kwargs = {})
# %iota_1 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cpu, requires_grad: False})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%unsqueeze_2, %iota_1), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %full_default_1), kwargs = {})
# %device_put : [num_users=2] = call_function[target=torch.ops.prims.device_put.default](args = (%where, cuda:0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%device_put, 0.0), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
triton_poi_fused__to_copy_add_div_eye_mul_2 = async_compile.triton('triton_poi_fused__to_copy_add_div_eye_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_div_eye_mul_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_div_eye_mul_2(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = 0.3333333333333333
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tmp5 = x1
tmp6 = x0
tmp7 = tmp5 == tmp6
tmp8 = 0.0
tmp9 = tl.where(tmp7, tmp3, tmp8)
tmp10 = tmp9 * tmp8
tmp11 = tmp4 + tmp10
tl.store(in_out_ptr0 + (x2), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/am/camagq7vaatmgdqo7yb5xw5letqk3v3jhganlmizmzy3sx2xi7gu.py
# Topologically Sorted Source Nodes: [eye, eye_1], Original ATen: [aten.eye, aten._to_copy]
# Source node to ATen node mapping:
# eye => eq, full_default, full_default_1, iota_1, where
# eye_1 => device_put
# Graph fragment:
# %iota_1 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cpu, requires_grad: False})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%unsqueeze_2, %iota_1), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %full_default_1), kwargs = {})
# %device_put : [num_users=2] = call_function[target=torch.ops.prims.device_put.default](args = (%where, cuda:0), kwargs = {})
triton_poi_fused__to_copy_eye_3 = async_compile.triton('triton_poi_fused__to_copy_eye_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_eye_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_eye_3(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tmp1 = x0
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(out_ptr0 + (x2), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [xn], Original ATen: [aten.sub]
stream0 = get_raw_stream(0)
triton_poi_fused_sub_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous, T], Original ATen: [aten.clone, aten.view]
triton_poi_fused_clone_view_1.run(buf0, buf1, 4, 4, grid=grid(4, 4), stream=stream0)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mm], Original ATen: [aten.mm]
extern_kernels.mm(buf1, reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2)
del buf1
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [f_cov, mul, eye, eye_1, mul_1, f_cov_shrinked], Original ATen: [aten.div, aten.mul, aten.eye, aten._to_copy, aten.add]
triton_poi_fused__to_copy_add_div_eye_mul_2.run(buf3, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [f_cov, mul, eye, eye_1, mul_1, f_cov_shrinked, cholesky], Original ATen: [aten.div, aten.mul, aten.eye, aten._to_copy, aten.add, aten.cholesky]
buf4 = torch.ops.aten.cholesky.default(buf3)
buf5 = buf4
del buf4
buf6 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [eye, eye_1], Original ATen: [aten.eye, aten._to_copy]
triton_poi_fused__to_copy_eye_3.run(buf6, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [eye, eye_1, triangular_solve], Original ATen: [aten.eye, aten._to_copy, aten.triangular_solve]
buf7 = torch.ops.aten.triangular_solve.default(buf6, buf5, False)
del buf5
buf8 = buf7[0]
del buf7
buf10 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [contiguous_1], Original ATen: [aten.clone]
triton_poi_fused_clone_view_1.run(buf8, buf10, 4, 4, grid=grid(4, 4), stream=stream0)
del buf8
# Topologically Sorted Source Nodes: [decorrelated], Original ATen: [aten.convolution]
buf11 = extern_kernels.convolution(buf0, reinterpret_tensor(buf10, (4, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 4, 1, 1), (4, 1, 1, 1))
del buf0
del buf10
return (reinterpret_tensor(buf11, (4, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.cuda.amp import custom_fwd
from torch.nn.functional import conv2d
class Whitening2d(nn.Module):
def __init__(self, output_dim: 'int', eps: 'float'=0.0):
"""Layer that computes hard whitening for W-MSE using the Cholesky decomposition.
Args:
output_dim (int): number of dimension of projected features.
eps (float, optional): eps for numerical stability in Cholesky decomposition. Defaults
to 0.0.
"""
super(Whitening2d, self).__init__()
self.output_dim = output_dim
self.eps = eps
@custom_fwd(cast_inputs=torch.float32)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
"""Performs whitening using the Cholesky decomposition.
Args:
x (torch.Tensor): a batch or slice of projected features.
Returns:
torch.Tensor: a batch or slice of whitened features.
"""
x = x.unsqueeze(2).unsqueeze(3)
m = x.mean(0).view(self.output_dim, -1).mean(-1).view(1, -1, 1, 1)
xn = x - m
T = xn.permute(1, 0, 2, 3).contiguous().view(self.output_dim, -1)
f_cov = torch.mm(T, T.permute(1, 0)) / (T.shape[-1] - 1)
eye = torch.eye(self.output_dim).type(f_cov.type())
f_cov_shrinked = (1 - self.eps) * f_cov + self.eps * eye
inv_sqrt = torch.triangular_solve(eye, torch.cholesky(
f_cov_shrinked), upper=False)[0]
inv_sqrt = inv_sqrt.contiguous().view(self.output_dim, self.
output_dim, 1, 1)
decorrelated = conv2d(xn, inv_sqrt)
return decorrelated.squeeze(2).squeeze(2)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'output_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = 1.0
tmp11 = tmp9 / tmp10
tmp12 = tmp0 - tmp11
tl.store(out_ptr0 + x2, tmp12, xmask)
@triton.jit
def triton_poi_fused_clone_view_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__to_copy_add_div_eye_mul_2(in_out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = 0.3333333333333333
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tmp5 = x1
tmp6 = x0
tmp7 = tmp5 == tmp6
tmp8 = 0.0
tmp9 = tl.where(tmp7, tmp3, tmp8)
tmp10 = tmp9 * tmp8
tmp11 = tmp4 + tmp10
tl.store(in_out_ptr0 + x2, tmp11, xmask)
@triton.jit
def triton_poi_fused__to_copy_eye_3(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tmp1 = x0
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(out_ptr0 + x2, tmp5, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_sub_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_clone_view_1[grid(4, 4)](buf0, buf1, 4, 4, XBLOCK=
4, YBLOCK=4, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(buf1, (4, 4), (1, 4), 0),
out=buf2)
del buf1
buf3 = buf2
del buf2
triton_poi_fused__to_copy_add_div_eye_mul_2[grid(16)](buf3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf4 = torch.ops.aten.cholesky.default(buf3)
buf5 = buf4
del buf4
buf6 = buf3
del buf3
triton_poi_fused__to_copy_eye_3[grid(16)](buf6, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf7 = torch.ops.aten.triangular_solve.default(buf6, buf5, False)
del buf5
buf8 = buf7[0]
del buf7
buf10 = buf6
del buf6
triton_poi_fused_clone_view_1[grid(4, 4)](buf8, buf10, 4, 4, XBLOCK
=4, YBLOCK=4, num_warps=1, num_stages=1)
del buf8
buf11 = extern_kernels.convolution(buf0, reinterpret_tensor(buf10,
(4, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0),
dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf11, (4, 4, 1, 1), (4, 1, 1, 1))
del buf0
del buf10
return reinterpret_tensor(buf11, (4, 4), (4, 1), 0),
class Whitening2dNew(nn.Module):
def __init__(self, output_dim: 'int', eps: 'float'=0.0):
"""Layer that computes hard whitening for W-MSE using the Cholesky decomposition.
Args:
output_dim (int): number of dimension of projected features.
eps (float, optional): eps for numerical stability in Cholesky decomposition. Defaults
to 0.0.
"""
super(Whitening2dNew, self).__init__()
self.output_dim = output_dim
self.eps = eps
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| TranNhiem/solo-learn | Whitening2d | false | 1,151 | [
"MIT"
] | 0 | 7539732b68d153087d09a26a23e1edfdc49bc086 | https://github.com/TranNhiem/solo-learn/tree/7539732b68d153087d09a26a23e1edfdc49bc086 | import torch
import torch.nn as nn
from torch.cuda.amp import custom_fwd
from torch.nn.functional import conv2d
class Model(nn.Module):
def __init__(self, output_dim: 'int', eps: 'float'=0.0):
"""Layer that computes hard whitening for W-MSE using the Cholesky decomposition.
Args:
output_dim (int): number of dimension of projected features.
eps (float, optional): eps for numerical stability in Cholesky decomposition. Defaults
to 0.0.
"""
super().__init__()
self.output_dim = output_dim
self.eps = eps
@custom_fwd(cast_inputs=torch.float32)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
"""Performs whitening using the Cholesky decomposition.
Args:
x (torch.Tensor): a batch or slice of projected features.
Returns:
torch.Tensor: a batch or slice of whitened features.
"""
x = x.unsqueeze(2).unsqueeze(3)
m = x.mean(0).view(self.output_dim, -1).mean(-1).view(1, -1, 1, 1)
xn = x - m
T = xn.permute(1, 0, 2, 3).contiguous().view(self.output_dim, -1)
f_cov = torch.mm(T, T.permute(1, 0)) / (T.shape[-1] - 1)
eye = torch.eye(self.output_dim).type(f_cov.type())
f_cov_shrinked = (1 - self.eps) * f_cov + self.eps * eye
inv_sqrt = torch.triangular_solve(eye, torch.cholesky(
f_cov_shrinked), upper=False)[0]
inv_sqrt = inv_sqrt.contiguous().view(self.output_dim, self.
output_dim, 1, 1)
decorrelated = conv2d(xn, inv_sqrt)
return decorrelated.squeeze(2).squeeze(2)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [4]
|
ClassificationModel | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/cb/ccbgymnr2fvk43axzcuowohjalipdfn2nc4qqvidfjzuqhtxsj6g.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 1024
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (4*x2) + (36*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/j5/cj5nf2owtsdm2zwcezqxpyn63iwddjyadpotkhm2ua52inoqxdcl.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask)
tl.store(out_ptr0 + (y0 + (4*x2) + (64*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/co/ccosum7u5lx5fx5hf5opofiygxj2ntiq67yo5gfegevmhtkaru4r.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 65536
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = (yindex // 256)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/c2/cc24idh7iumu54cabqtyf4bwq723mqt6nb4chiwnswjfaoolg4us.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 184320
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = (yindex // 256)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/wj/cwjii5g7vcokiqucazdgsrvnsqad3q7z4gbxiwezolbw7o6ilfmr.py
# Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# out => convolution
# out_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/5d/c5denb5h7n772ohbbcrslg7gqns4fexacfz7q33ejdelmalo2d77.py
# Topologically Sorted Source Nodes: [out_10, contiguous], Original ATen: [aten.convolution, aten.clone]
# Source node to ATen node mapping:
# contiguous => clone_2
# out_10 => convolution_4
# Graph fragment:
# %convolution_4 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_3, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%view,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_convolution_5 = async_compile.triton('triton_poi_fused_clone_convolution_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_convolution_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_convolution_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 46080
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 720
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
tl.store(out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (256, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (256, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_5, (256, ), (1, ))
assert_size_stride(primals_6, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_7, (256, ), (1, ))
assert_size_stride(primals_8, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_9, (256, ), (1, ))
assert_size_stride(primals_10, (720, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_11, (720, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((256, 4, 3, 3), (36, 1, 12, 4), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_1, buf0, 1024, 9, grid=grid(1024, 9), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_3, buf1, 16, 16, grid=grid(16, 16), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_4, buf2, 65536, 9, grid=grid(65536, 9), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_6, buf3, 65536, 9, grid=grid(65536, 9), stream=stream0)
del primals_6
buf4 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_8, buf4, 65536, 9, grid=grid(65536, 9), stream=stream0)
del primals_8
buf5 = empty_strided_cuda((720, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_10, buf5, 184320, 9, grid=grid(184320, 9), stream=stream0)
del primals_10
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 256, 4, 4), (4096, 1, 1024, 256))
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf7, primals_2, 16384, grid=grid(16384), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 256, 4, 4), (4096, 1, 1024, 256))
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf9, primals_5, 16384, grid=grid(16384), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf9, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 256, 4, 4), (4096, 1, 1024, 256))
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [out_4, out_5], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf11, primals_7, 16384, grid=grid(16384), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [out_7], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf11, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 256, 4, 4), (4096, 1, 1024, 256))
buf13 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [out_7, out_8], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf13, primals_9, 16384, grid=grid(16384), stream=stream0)
del primals_9
# Topologically Sorted Source Nodes: [out_10], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(buf13, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 720, 4, 4), (11520, 1, 2880, 720))
buf15 = buf14; del buf14 # reuse
buf16 = empty_strided_cuda((4, 4, 4, 9, 80), (11520, 2880, 720, 80, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_10, contiguous], Original ATen: [aten.convolution, aten.clone]
triton_poi_fused_clone_convolution_5.run(buf15, primals_11, buf16, 46080, grid=grid(46080), stream=stream0)
del primals_11
return (reinterpret_tensor(buf16, (4, 144, 80), (11520, 80, 1), 0), buf0, buf1, buf2, buf3, buf4, buf5, buf7, buf9, buf11, buf13, buf15, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((256, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((720, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((720, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class ClassificationModel(nn.Module):
def __init__(self, num_features_in, num_anchors=9, num_classes=80,
prior=0.01, feature_size=256, dropout1=0.25, dropout2=0.25):
super(ClassificationModel, self).__init__()
self.num_classes = num_classes
self.num_anchors = num_anchors
self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3,
padding=1)
self.act1 = nn.ReLU()
self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act2 = nn.ReLU()
self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act3 = nn.ReLU()
self.dropout1 = nn.Dropout(p=dropout1)
self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act4 = nn.ReLU()
self.dropout2 = nn.Dropout(p=dropout2)
self.output = nn.Conv2d(feature_size, num_anchors * num_classes,
kernel_size=3, padding=1)
self.output_act = nn.Sigmoid()
def forward(self, x):
out = self.conv1(x)
out = self.act1(out)
out = self.conv2(out)
out = self.act2(out)
out = self.conv3(out)
out = self.act3(out)
out = self.dropout1(out)
out = self.conv4(out)
out = self.act4(out)
out = self.dropout2(out)
out = self.output(out)
out = self.output_act(out)
out1 = out.permute(0, 2, 3, 1)
batch_size, width, height, _channels = out1.shape
out2 = out1.view(batch_size, width, height, self.num_anchors, self.
num_classes)
return out2.contiguous().view(x.shape[0], -1, self.num_classes)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_features_in': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 4 * x2 + 36 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask)
tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_clone_convolution_5(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 46080
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 720
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp2, xmask)
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (256, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (720, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_11, (720,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((256, 4, 3, 3), (36, 1, 12, 4), torch.float32
)
get_raw_stream(0)
triton_poi_fused_0[grid(1024, 9)](primals_1, buf0, 1024, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
triton_poi_fused_1[grid(16, 16)](primals_3, buf1, 16, 16, XBLOCK=16,
YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_2[grid(65536, 9)](primals_4, buf2, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_2[grid(65536, 9)](primals_6, buf3, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf4 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_2[grid(65536, 9)](primals_8, buf4, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf5 = empty_strided_cuda((720, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_3[grid(184320, 9)](primals_10, buf5, 184320, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_10
buf6 = extern_kernels.convolution(buf1, buf0, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 256, 4, 4), (4096, 1, 1024, 256))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_4[grid(16384)](buf7, primals_2,
16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf8 = extern_kernels.convolution(buf7, buf2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 256, 4, 4), (4096, 1, 1024, 256))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_4[grid(16384)](buf9, primals_5,
16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf10 = extern_kernels.convolution(buf9, buf3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 256, 4, 4), (4096, 1, 1024, 256))
buf11 = buf10
del buf10
triton_poi_fused_convolution_relu_4[grid(16384)](buf11, primals_7,
16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
buf12 = extern_kernels.convolution(buf11, buf4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 256, 4, 4), (4096, 1, 1024, 256))
buf13 = buf12
del buf12
triton_poi_fused_convolution_relu_4[grid(16384)](buf13, primals_9,
16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_9
buf14 = extern_kernels.convolution(buf13, buf5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 720, 4, 4), (11520, 1, 2880, 720))
buf15 = buf14
del buf14
buf16 = empty_strided_cuda((4, 4, 4, 9, 80), (11520, 2880, 720, 80,
1), torch.float32)
triton_poi_fused_clone_convolution_5[grid(46080)](buf15, primals_11,
buf16, 46080, XBLOCK=512, num_warps=4, num_stages=1)
del primals_11
return reinterpret_tensor(buf16, (4, 144, 80), (11520, 80, 1), 0
), buf0, buf1, buf2, buf3, buf4, buf5, buf7, buf9, buf11, buf13, buf15
class ClassificationModelNew(nn.Module):
def __init__(self, num_features_in, num_anchors=9, num_classes=80,
prior=0.01, feature_size=256, dropout1=0.25, dropout2=0.25):
super(ClassificationModelNew, self).__init__()
self.num_classes = num_classes
self.num_anchors = num_anchors
self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3,
padding=1)
self.act1 = nn.ReLU()
self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act2 = nn.ReLU()
self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act3 = nn.ReLU()
self.dropout1 = nn.Dropout(p=dropout1)
self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act4 = nn.ReLU()
self.dropout2 = nn.Dropout(p=dropout2)
self.output = nn.Conv2d(feature_size, num_anchors * num_classes,
kernel_size=3, padding=1)
self.output_act = nn.Sigmoid()
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.output.weight
primals_11 = self.output.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| TobyChen0106/DeepQ_Final_B05901170 | ClassificationModel | false | 1,152 | [
"Apache-2.0"
] | 0 | 808a224c01272726a051eb7b7bb9e1b28887716e | https://github.com/TobyChen0106/DeepQ_Final_B05901170/tree/808a224c01272726a051eb7b7bb9e1b28887716e | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, num_features_in, num_anchors=9, num_classes=80,
prior=0.01, feature_size=256, dropout1=0.25, dropout2=0.25):
super().__init__()
self.num_classes = num_classes
self.num_anchors = num_anchors
self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3,
padding=1)
self.act1 = nn.ReLU()
self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act2 = nn.ReLU()
self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act3 = nn.ReLU()
self.dropout1 = nn.Dropout(p=dropout1)
self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act4 = nn.ReLU()
self.dropout2 = nn.Dropout(p=dropout2)
self.output = nn.Conv2d(feature_size, num_anchors * num_classes,
kernel_size=3, padding=1)
self.output_act = nn.Sigmoid()
def forward(self, x):
out = self.conv1(x)
out = self.act1(out)
out = self.conv2(out)
out = self.act2(out)
out = self.conv3(out)
out = self.act3(out)
out = self.dropout1(out)
out = self.conv4(out)
out = self.act4(out)
out = self.dropout2(out)
out = self.output(out)
out = self.output_act(out)
out1 = out.permute(0, 2, 3, 1)
batch_size, width, height, _channels = out1.shape
out2 = out1.view(batch_size, width, height, self.num_anchors, self.
num_classes)
return out2.contiguous().view(x.shape[0], -1, self.num_classes)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
ContrastiveLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/vg/cvgidnhbzjfqodump67yyhbxdotu5ydihgvgmgmpsvm4p3qrqo4q.py
# Topologically Sorted Source Nodes: [sub, pow_1, dist], Original ATen: [aten.sub, aten.pow, aten.sum]
# Source node to ATen node mapping:
# dist => sum_1
# pow_1 => pow_1
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem, %getitem_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {})
triton_poi_fused_pow_sub_sum_0 = async_compile.triton('triton_poi_fused_pow_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_pow_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_pow_sub_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (128 + x0 + (64*x1)), xmask)
tmp4 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (144 + x0 + (64*x1)), xmask)
tmp9 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp10 = tl.load(in_ptr0 + (160 + x0 + (64*x1)), xmask)
tmp14 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp15 = tl.load(in_ptr0 + (176 + x0 + (64*x1)), xmask)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tl.store(out_ptr0 + (x2), tmp18, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/j5/cj55xji4corldscu2fvwxw4osguzugsn2pfvgmlufmwg77e3mt6e.py
# Topologically Sorted Source Nodes: [mul, sub_1, sub_2, relu, mul_1, loss, mean, truediv], Original ATen: [aten.mul, aten.rsub, aten.relu, aten.add, aten.mean, aten.div]
# Source node to ATen node mapping:
# loss => add
# mean => mean
# mul => mul
# mul_1 => mul_1
# relu => relu
# sub_1 => sub_1
# sub_2 => sub_2
# truediv => div
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %sum_1), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %sum_1), kwargs = {})
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_2,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %relu), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%add,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mean, 2.0), kwargs = {})
triton_per_fused_add_div_mean_mul_relu_rsub_1 = async_compile.triton('triton_per_fused_add_div_mean_mul_relu_rsub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 128],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_relu_rsub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mean_mul_relu_rsub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 128
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
r0 = rindex % 32
tmp0 = tl.load(in_ptr0 + (r2), None)
tmp1 = tl.load(in_ptr1 + (r0), None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp0
tmp5 = tmp3 - tmp1
tmp6 = tl.full([1, 1], 0, tl.int32)
tmp7 = triton_helpers.maximum(tmp6, tmp5)
tmp8 = tmp4 * tmp7
tmp9 = tmp2 + tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.sum(tmp10, 1)[:, None]
tmp13 = 128.0
tmp14 = tmp12 / tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp16, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 2, 4, 4), (32, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((2, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, pow_1, dist], Original ATen: [aten.sub, aten.pow, aten.sum]
stream0 = get_raw_stream(0)
triton_poi_fused_pow_sub_sum_0.run(arg0_1, buf0, 32, grid=grid(32), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [mul, sub_1, sub_2, relu, mul_1, loss, mean, truediv], Original ATen: [aten.mul, aten.rsub, aten.relu, aten.add, aten.mean, aten.div]
triton_per_fused_add_div_mean_mul_relu_rsub_1.run(buf2, arg1_1, buf0, 1, 128, grid=grid(1), stream=stream0)
del arg1_1
del buf0
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 2, 4, 4), (32, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class ContrastiveLoss(nn.Module):
"""
contrastive loss
L2 distance:
L(a1,a2,y) = y * d(a1, a2) + (1-y)*max(0, m - d(a1, a2))
cosine distance:
L(a1, a2, y) = y * (1 - d(a1,a2)) + (1-y) * max(0, d(a1,a2) -m)
where y=1 if (a1,a2) relevant else 0
"""
def __init__(self, margin=1.0, metric='l2'):
super().__init__()
self.margin = margin
self.metric = metric
metric_list = ['l2', 'cosine']
assert metric in metric_list, 'Error! contrastive metric %s not supported.' % metric
self.metric_id = metric_list.index(metric)
def forward(self, x, y):
a, p = x.chunk(2, dim=0)
if self.metric_id == 0:
dist = torch.sum((a - p) ** 2, dim=1)
loss = y * dist + (1 - y) * F.relu(self.margin - dist)
else:
dist = F.cosine_similarity(a, p)
loss = y * (1 - dist) + (1 - y) * F.relu(dist - self.margin)
return loss.mean() / 2.0
def extra_repr(self) ->str:
return '?xD -> scalar (Loss)'
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 2, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_pow_sub_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (128 + x0 + 64 * x1), xmask)
tmp4 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (144 + x0 + 64 * x1), xmask)
tmp9 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp10 = tl.load(in_ptr0 + (160 + x0 + 64 * x1), xmask)
tmp14 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp15 = tl.load(in_ptr0 + (176 + x0 + 64 * x1), xmask)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tl.store(out_ptr0 + x2, tmp18, xmask)
@triton.jit
def triton_per_fused_add_div_mean_mul_relu_rsub_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
r0 = rindex % 32
tmp0 = tl.load(in_ptr0 + r2, None)
tmp1 = tl.load(in_ptr1 + r0, None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp0
tmp5 = tmp3 - tmp1
tmp6 = tl.full([1, 1], 0, tl.int32)
tmp7 = triton_helpers.maximum(tmp6, tmp5)
tmp8 = tmp4 * tmp7
tmp9 = tmp2 + tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.sum(tmp10, 1)[:, None]
tmp13 = 128.0
tmp14 = tmp12 / tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp16, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 2, 4, 4), (32, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((2, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_pow_sub_sum_0[grid(32)](arg0_1, buf0, 32, XBLOCK=
32, num_warps=1, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused_add_div_mean_mul_relu_rsub_1[grid(1)](buf2, arg1_1,
buf0, 1, 128, XBLOCK=1, num_warps=2, num_stages=1)
del arg1_1
del buf0
return buf2,
class ContrastiveLossNew(nn.Module):
"""
contrastive loss
L2 distance:
L(a1,a2,y) = y * d(a1, a2) + (1-y)*max(0, m - d(a1, a2))
cosine distance:
L(a1, a2, y) = y * (1 - d(a1,a2)) + (1-y) * max(0, d(a1,a2) -m)
where y=1 if (a1,a2) relevant else 0
"""
def __init__(self, margin=1.0, metric='l2'):
super().__init__()
self.margin = margin
self.metric = metric
metric_list = ['l2', 'cosine']
assert metric in metric_list, 'Error! contrastive metric %s not supported.' % metric
self.metric_id = metric_list.index(metric)
def extra_repr(self) ->str:
return '?xD -> scalar (Loss)'
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| TuBui/deep_image_comparator | ContrastiveLoss | false | 1,153 | [
"MIT"
] | 0 | 2dea7738d794b91a960ee9f41461a4e3ffcd5e44 | https://github.com/TuBui/deep_image_comparator/tree/2dea7738d794b91a960ee9f41461a4e3ffcd5e44 | import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
"""
contrastive loss
L2 distance:
L(a1,a2,y) = y * d(a1, a2) + (1-y)*max(0, m - d(a1, a2))
cosine distance:
L(a1, a2, y) = y * (1 - d(a1,a2)) + (1-y) * max(0, d(a1,a2) -m)
where y=1 if (a1,a2) relevant else 0
"""
def __init__(self, margin=1.0, metric='l2'):
super().__init__()
self.margin = margin
self.metric = metric
metric_list = ['l2', 'cosine']
assert metric in metric_list, 'Error! contrastive metric %s not supported.' % metric
self.metric_id = metric_list.index(metric)
def forward(self, x, y):
a, p = x.chunk(2, dim=0)
if self.metric_id == 0:
dist = torch.sum((a - p) ** 2, dim=1)
loss = y * dist + (1 - y) * F.relu(self.margin - dist)
else:
dist = F.cosine_similarity(a, p)
loss = y * (1 - dist) + (1 - y) * F.relu(dist - self.margin)
return loss.mean() / 2.0
def extra_repr(self) ->str:
return '?xD -> scalar (Loss)'
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 2, 4, 4])]
def get_init_inputs():
return []
|
GreedyHashLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/au/cau5so7arj5gacdef6kj7djfhc7dlz27fly6cilgbqrxabo4ilda.py
# Topologically Sorted Source Nodes: [b, abs_1, sub, pow_1, abs_2, loss], Original ATen: [aten.sign, aten.abs, aten.sub, aten.pow, aten.mean]
# Source node to ATen node mapping:
# abs_1 => abs_1
# abs_2 => abs_2
# b => sign
# loss => mean
# pow_1 => pow_1
# sub => sub
# Graph fragment:
# %sign : [num_users=1] = call_function[target=torch.ops.aten.sign.default](args = (%arg0_1,), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg0_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%abs_1, 1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 3), kwargs = {})
# %abs_2 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%pow_1,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%abs_2,), kwargs = {})
triton_per_fused_abs_mean_pow_sign_sub_0 = async_compile.triton('triton_per_fused_abs_mean_pow_sign_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_mean_pow_sign_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_mean_pow_sign_sub_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp1 < tmp0
tmp3 = tmp2.to(tl.int8)
tmp4 = tmp0 < tmp1
tmp5 = tmp4.to(tl.int8)
tmp6 = tmp3 - tmp5
tmp7 = tmp6.to(tmp0.dtype)
tmp8 = tl_math.abs(tmp0)
tmp9 = 1.0
tmp10 = tmp8 - tmp9
tmp11 = tmp10 * tmp10
tmp12 = tmp11 * tmp10
tmp13 = tl_math.abs(tmp12)
tmp14 = tl.broadcast_to(tmp13, [RBLOCK])
tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0))
tmp17 = 256.0
tmp18 = tmp16 / tmp17
tl.store(out_ptr0 + (tl.broadcast_to(r0, [RBLOCK])), tmp7, None)
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp18, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [b, abs_1, sub, pow_1, abs_2, loss], Original ATen: [aten.sign, aten.abs, aten.sub, aten.pow, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_abs_mean_pow_sign_sub_0.run(buf2, arg0_1, buf0, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
return (buf0, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class GreedyHashLoss(torch.nn.Module):
def __init__(self):
super(GreedyHashLoss, self).__init__()
def forward(self, u):
b = GreedyHashLoss.Hash.apply(u)
loss = (u.abs() - 1).pow(3).abs().mean()
return b, loss
class Hash(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return input.sign()
@staticmethod
def backward(ctx, grad_output):
return grad_output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_mean_pow_sign_sub_0(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp1 < tmp0
tmp3 = tmp2.to(tl.int8)
tmp4 = tmp0 < tmp1
tmp5 = tmp4.to(tl.int8)
tmp6 = tmp3 - tmp5
tmp7 = tmp6.to(tmp0.dtype)
tmp8 = tl_math.abs(tmp0)
tmp9 = 1.0
tmp10 = tmp8 - tmp9
tmp11 = tmp10 * tmp10
tmp12 = tmp11 * tmp10
tmp13 = tl_math.abs(tmp12)
tmp14 = tl.broadcast_to(tmp13, [RBLOCK])
tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0))
tmp17 = 256.0
tmp18 = tmp16 / tmp17
tl.store(out_ptr0 + tl.broadcast_to(r0, [RBLOCK]), tmp7, None)
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_abs_mean_pow_sign_sub_0[grid(1)](buf2, arg0_1,
buf0, 1, 256, num_warps=2, num_stages=1)
del arg0_1
return buf0, buf2
class GreedyHashLossNew(torch.nn.Module):
def __init__(self):
super(GreedyHashLossNew, self).__init__()
class Hash(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return input.sign()
@staticmethod
def backward(ctx, grad_output):
return grad_output
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0], output[1]
| TuBui/deep_image_comparator | GreedyHashLoss | false | 1,154 | [
"MIT"
] | 0 | 2dea7738d794b91a960ee9f41461a4e3ffcd5e44 | https://github.com/TuBui/deep_image_comparator/tree/2dea7738d794b91a960ee9f41461a4e3ffcd5e44 | import torch
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, u):
b = GreedyHashLoss.Hash.apply(u)
loss = (u.abs() - 1).pow(3).abs().mean()
return b, loss
class Hash(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return input.sign()
@staticmethod
def backward(ctx, grad_output):
return grad_output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
MultiHeadAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/rh/crhy6nilvaajphuuoyup37xl4ncuiyrcb3fnt5aboux6wyvcg7ie.py
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# attn => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (64*y1)), xmask & ymask)
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (16*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/xl/cxldlhjpfliyaeswhsohcdhtqevqxjlvece7kkxd6sy4o7gkfgo3.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, div_1, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_11, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_11, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_per_fused__softmax_1 = async_compile.triton('triton_per_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[256, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 256
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + (16*x0)), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/mz/cmzlu2lip25blpsdqeby7ek5757op6xw3pdkxbdediou5szw32tx.py
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = (yindex // 16)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_9, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 16, 1), (64, 16, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf0, primals_3, buf3, 16, 16, grid=grid(16, 16), stream=stream0)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 16), (64, 16, 16, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf1, primals_5, buf4, 16, 16, grid=grid(16, 16), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((16, 16, 16), (256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 16, 1), (16, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 16), (16, 0, 1), 0), out=buf5)
buf8 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_per_fused__softmax_1.run(buf5, buf8, 256, 16, grid=grid(256), stream=stream0)
del buf5
buf9 = reinterpret_tensor(buf1, (4, 4, 16, 1), (64, 16, 1, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf2, primals_8, buf9, 16, 16, grid=grid(16, 16), stream=stream0)
del primals_8
buf10 = reinterpret_tensor(buf2, (16, 16, 1), (16, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 16, 16), (256, 16, 1), 0), reinterpret_tensor(buf9, (16, 16, 1), (16, 1, 0), 0), out=buf10)
buf11 = empty_strided_cuda((4, 16, 4, 1), (64, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf10, buf11, 64, 4, grid=grid(64, 4), stream=stream0)
buf12 = reinterpret_tensor(buf10, (64, 4), (4, 1), 0); del buf10 # reuse
# Topologically Sorted Source Nodes: [output_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, reinterpret_tensor(buf11, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf12)
del primals_11
return (reinterpret_tensor(buf12, (4, 16, 4), (64, 4, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_9, (64, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (64, 4), (4, 1), 0), primals_10, reinterpret_tensor(buf9, (16, 1, 16), (16, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 16), (16, 1, 1), 0), reinterpret_tensor(buf4, (16, 16, 1), (16, 1, 16), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
class SelfAttention(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, dropout=0.1):
super(SelfAttention, self).__init__()
self.dropout = nn.Dropout(dropout)
def forward(self, query, key, value, mask=None):
key_dim = key.size(-1)
attn = torch.matmul(query / np.sqrt(key_dim), key.transpose(2, 3))
if mask is not None:
mask = mask.unsqueeze(1)
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = self.dropout(torch.softmax(attn, dim=-1))
output = torch.matmul(attn, value)
return output
class MultiHeadAttention(nn.Module):
def __init__(self, embedding_dim, num_heads, dropout=0.1):
super(MultiHeadAttention, self).__init__()
self.embedding_dim = embedding_dim
self.self_attention = SelfAttention(dropout)
self.num_heads = num_heads
self.dim_per_head = embedding_dim // num_heads
self.query_projection = nn.Linear(embedding_dim, embedding_dim)
self.key_projection = nn.Linear(embedding_dim, embedding_dim)
self.value_projection = nn.Linear(embedding_dim, embedding_dim)
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(embedding_dim, embedding_dim)
def forward(self, query, key, value, mask=None):
batch_size = query.size(0)
query = self.query_projection(query)
key = self.key_projection(key)
value = self.value_projection(value)
query = query.view(batch_size, -1, self.num_heads, self.dim_per_head
).transpose(1, 2)
key = key.view(batch_size, -1, self.num_heads, self.dim_per_head
).transpose(1, 2)
value = value.view(batch_size, -1, self.num_heads, self.dim_per_head
).transpose(1, 2)
scores = self.self_attention(query, key, value, mask)
output = scores.transpose(1, 2).contiguous().view(batch_size, -1,
self.embedding_dim)
output = self.out(output)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'embedding_dim': 4, 'num_heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 256
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_9, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 16, 1), (64, 16, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 16)](buf0, primals_3, buf3, 16,
16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 16), (64, 16, 16, 1), 0)
del buf0
triton_poi_fused_clone_0[grid(16, 16)](buf1, primals_5, buf4, 16,
16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 16, 16), (256, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 16, 1), (16, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 16), (16, 0, 1), 0), out=buf5)
buf8 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch
.float32)
triton_per_fused__softmax_1[grid(256)](buf5, buf8, 256, 16, XBLOCK=
32, num_warps=4, num_stages=1)
del buf5
buf9 = reinterpret_tensor(buf1, (4, 4, 16, 1), (64, 16, 1, 1), 0)
del buf1
triton_poi_fused_clone_0[grid(16, 16)](buf2, primals_8, buf9, 16,
16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_8
buf10 = reinterpret_tensor(buf2, (16, 16, 1), (16, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 16, 16), (256, 16,
1), 0), reinterpret_tensor(buf9, (16, 16, 1), (16, 1, 0), 0),
out=buf10)
buf11 = empty_strided_cuda((4, 16, 4, 1), (64, 4, 1, 1), torch.float32)
triton_poi_fused_clone_2[grid(64, 4)](buf10, buf11, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf12 = reinterpret_tensor(buf10, (64, 4), (4, 1), 0)
del buf10
extern_kernels.addmm(primals_11, reinterpret_tensor(buf11, (64, 4),
(4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf12)
del primals_11
return reinterpret_tensor(buf12, (4, 16, 4), (64, 4, 1), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_9, (64, 4), (4, 1), 0
), buf8, reinterpret_tensor(buf11, (64, 4), (4, 1), 0
), primals_10, reinterpret_tensor(buf9, (16, 1, 16), (16, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 16), (16, 1, 1), 0
), reinterpret_tensor(buf4, (16, 16, 1), (16, 1, 16), 0)
class SelfAttention(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, dropout=0.1):
super(SelfAttention, self).__init__()
self.dropout = nn.Dropout(dropout)
def forward(self, query, key, value, mask=None):
key_dim = key.size(-1)
attn = torch.matmul(query / np.sqrt(key_dim), key.transpose(2, 3))
if mask is not None:
mask = mask.unsqueeze(1)
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = self.dropout(torch.softmax(attn, dim=-1))
output = torch.matmul(attn, value)
return output
class MultiHeadAttentionNew(nn.Module):
def __init__(self, embedding_dim, num_heads, dropout=0.1):
super(MultiHeadAttentionNew, self).__init__()
self.embedding_dim = embedding_dim
self.self_attention = SelfAttention(dropout)
self.num_heads = num_heads
self.dim_per_head = embedding_dim // num_heads
self.query_projection = nn.Linear(embedding_dim, embedding_dim)
self.key_projection = nn.Linear(embedding_dim, embedding_dim)
self.value_projection = nn.Linear(embedding_dim, embedding_dim)
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(embedding_dim, embedding_dim)
def forward(self, input_0, input_1, input_2):
primals_2 = self.query_projection.weight
primals_3 = self.query_projection.bias
primals_4 = self.key_projection.weight
primals_5 = self.key_projection.bias
primals_7 = self.value_projection.weight
primals_8 = self.value_projection.bias
primals_10 = self.out.weight
primals_11 = self.out.bias
primals_1 = input_0
primals_6 = input_1
primals_9 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| TranQuocTrinh/image_captioning | MultiHeadAttention | false | 1,155 | [
"MIT"
] | 0 | 4c2d77426ba3b9fe9151a15a958320d5298aa190 | https://github.com/TranQuocTrinh/image_captioning/tree/4c2d77426ba3b9fe9151a15a958320d5298aa190 | import torch
import numpy as np
import torch.nn as nn
class SelfAttention(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, dropout=0.1):
super().__init__()
self.dropout = nn.Dropout(dropout)
def forward(self, query, key, value, mask=None):
key_dim = key.size(-1)
attn = torch.matmul(query / np.sqrt(key_dim), key.transpose(2, 3))
if mask is not None:
mask = mask.unsqueeze(1)
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = self.dropout(torch.softmax(attn, dim=-1))
output = torch.matmul(attn, value)
return output
class Model(nn.Module):
def __init__(self, embedding_dim, num_heads, dropout=0.1):
super().__init__()
self.embedding_dim = embedding_dim
self.self_attention = SelfAttention(dropout)
self.num_heads = num_heads
self.dim_per_head = embedding_dim // num_heads
self.query_projection = nn.Linear(embedding_dim, embedding_dim)
self.key_projection = nn.Linear(embedding_dim, embedding_dim)
self.value_projection = nn.Linear(embedding_dim, embedding_dim)
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(embedding_dim, embedding_dim)
def forward(self, query, key, value, mask=None):
batch_size = query.size(0)
query = self.query_projection(query)
key = self.key_projection(key)
value = self.value_projection(value)
query = query.view(batch_size, -1, self.num_heads, self.dim_per_head
).transpose(1, 2)
key = key.view(batch_size, -1, self.num_heads, self.dim_per_head
).transpose(1, 2)
value = value.view(batch_size, -1, self.num_heads, self.dim_per_head
).transpose(1, 2)
scores = self.self_attention(query, key, value, mask)
output = scores.transpose(1, 2).contiguous().view(batch_size, -1,
self.embedding_dim)
output = self.out(output)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
LastBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ey/cey4yc74rqvkmtcxtse2vt3dw6pfdi3zwtwezx7cdzkykhz4kzp7.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# x_1 => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm, 1.0), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(buf1, 16, grid=grid(16), stream=stream0)
return (buf1, primals_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
class BatchNormLayer(nn.Module):
"""Implements batch normalization layer."""
def __init__(self, channels, gamma=False, beta=True, decay=0.9, epsilon
=1e-05):
"""Initializes with basic settings.
Args:
channels: Number of channels of the input tensor.
gamma: Whether the scale (weight) of the affine mapping is learnable.
beta: Whether the center (bias) of the affine mapping is learnable.
decay: Decay factor for moving average operations in this layer.
epsilon: A value added to the denominator for numerical stability.
"""
super().__init__()
self.bn = nn.BatchNorm2d(num_features=channels, affine=True,
track_running_stats=True, momentum=1 - decay, eps=epsilon)
self.bn.weight.requires_grad = gamma
self.bn.bias.requires_grad = beta
def forward(self, x):
return self.bn(x)
class LastBlock(nn.Module):
"""Implements the last block, which is a dense block."""
def __init__(self, in_channels, out_channels, use_wscale=False,
wscale_gain=1.0, use_bn=False):
super().__init__()
self.fc = nn.Linear(in_features=in_channels, out_features=
out_channels, bias=False)
self.scale = wscale_gain / np.sqrt(in_channels) if use_wscale else 1.0
self.bn = BatchNormLayer(channels=out_channels
) if use_bn else nn.Identity()
def forward(self, x):
x = x.view(x.shape[0], -1)
x = self.fc(x) * self.scale
x = x.view(x.shape[0], x.shape[1], 1, 1)
return self.bn(x).view(x.shape[0], x.shape[1])
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4),
(1, 4), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](buf1, 16, XBLOCK=16, num_warps=1,
num_stages=1)
return buf1, primals_1
class BatchNormLayer(nn.Module):
"""Implements batch normalization layer."""
def __init__(self, channels, gamma=False, beta=True, decay=0.9, epsilon
=1e-05):
"""Initializes with basic settings.
Args:
channels: Number of channels of the input tensor.
gamma: Whether the scale (weight) of the affine mapping is learnable.
beta: Whether the center (bias) of the affine mapping is learnable.
decay: Decay factor for moving average operations in this layer.
epsilon: A value added to the denominator for numerical stability.
"""
super().__init__()
self.bn = nn.BatchNorm2d(num_features=channels, affine=True,
track_running_stats=True, momentum=1 - decay, eps=epsilon)
self.bn.weight.requires_grad = gamma
self.bn.bias.requires_grad = beta
def forward(self, x):
return self.bn(x)
class LastBlockNew(nn.Module):
"""Implements the last block, which is a dense block."""
def __init__(self, in_channels, out_channels, use_wscale=False,
wscale_gain=1.0, use_bn=False):
super().__init__()
self.fc = nn.Linear(in_features=in_channels, out_features=
out_channels, bias=False)
self.scale = wscale_gain / np.sqrt(in_channels) if use_wscale else 1.0
self.bn = BatchNormLayer(channels=out_channels
) if use_bn else nn.Identity()
def forward(self, input_0):
primals_1 = self.fc.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| Twizwei/idinvert_pytorch | LastBlock | false | 1,156 | [
"MIT"
] | 0 | 11f1126aab517fbe32b488d92f6fdea339463d04 | https://github.com/Twizwei/idinvert_pytorch/tree/11f1126aab517fbe32b488d92f6fdea339463d04 | import torch
import numpy as np
import torch.nn as nn
class BatchNormLayer(nn.Module):
"""Implements batch normalization layer."""
def __init__(self, channels, gamma=False, beta=True, decay=0.9, epsilon
=1e-05):
"""Initializes with basic settings.
Args:
channels: Number of channels of the input tensor.
gamma: Whether the scale (weight) of the affine mapping is learnable.
beta: Whether the center (bias) of the affine mapping is learnable.
decay: Decay factor for moving average operations in this layer.
epsilon: A value added to the denominator for numerical stability.
"""
super().__init__()
self.bn = nn.BatchNorm2d(num_features=channels, affine=True,
track_running_stats=True, momentum=1 - decay, eps=epsilon)
self.bn.weight.requires_grad = gamma
self.bn.bias.requires_grad = beta
def forward(self, x):
return self.bn(x)
class Model(nn.Module):
"""Implements the last block, which is a dense block."""
def __init__(self, in_channels, out_channels, use_wscale=False,
wscale_gain=1.0, use_bn=False):
super().__init__()
self.fc = nn.Linear(in_features=in_channels, out_features=
out_channels, bias=False)
self.scale = wscale_gain / np.sqrt(in_channels) if use_wscale else 1.0
self.bn = BatchNormLayer(channels=out_channels
) if use_bn else nn.Identity()
def forward(self, x):
x = x.view(x.shape[0], -1)
x = self.fc(x) * self.scale
x = x.view(x.shape[0], x.shape[1], 1, 1)
return self.bn(x).view(x.shape[0], x.shape[1])
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [4, 4]
|
SineODE | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/bk/cbkiytchil7drgaq6pb5atrw5yi2nhe4ukuncsiopwphvtjq2hra.py
# Topologically Sorted Source Nodes: [mul, truediv, pow_1, mul_1, sin, mul_2, add, pow_2, sub, pow_3, mul_3, add_1], Original ATen: [aten.mul, aten.div, aten.pow, aten.sin, aten.add, aten.sub]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# pow_1 => pow_1
# pow_2 => pow_2
# pow_3 => pow_3
# sin => sin
# sub => sub
# truediv => div
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 2), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %arg1_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg1_1, 4), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, 2), kwargs = {})
# %sin : [num_users=1] = call_function[target=torch.ops.aten.sin.default](args = (%mul_1,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, %sin), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %mul_2), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg1_1, 2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %pow_2), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg1_1, 3), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_3, 4), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, %mul_3), kwargs = {})
triton_poi_fused_add_div_mul_pow_sin_sub_0 = async_compile.triton('triton_poi_fused_add_div_mul_pow_sin_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_pow_sin_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_pow_sin_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp3 = tl.load(in_ptr1 + (x0), xmask)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp4 = tmp2 / tmp3
tmp5 = tmp3 * tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp3 * tmp1
tmp8 = tl_math.sin(tmp7)
tmp9 = tmp6 * tmp8
tmp10 = tmp4 + tmp9
tmp11 = tmp10 - tmp5
tmp12 = tmp5 * tmp3
tmp13 = 4.0
tmp14 = tmp12 * tmp13
tmp15 = tmp11 + tmp14
tl.store(out_ptr0 + (x0), tmp15, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, truediv, pow_1, mul_1, sin, mul_2, add, pow_2, sub, pow_3, mul_3, add_1], Original ATen: [aten.mul, aten.div, aten.pow, aten.sin, aten.add, aten.sub]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mul_pow_sin_sub_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
class SineODE(torch.nn.Module):
def forward(self, t, y):
return 2 * y / t + t ** 4 * torch.sin(2 * t) - t ** 2 + 4 * t ** 3
def y_exact(self, t):
return -0.5 * t ** 4 * torch.cos(2 * t) + 0.5 * t ** 3 * torch.sin(
2 * t) + 0.25 * t ** 2 * torch.cos(2 * t) - t ** 3 + 2 * t ** 4 + (
math.pi - 0.25) * t ** 2
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mul_pow_sin_sub_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr1 + x0, xmask)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp4 = tmp2 / tmp3
tmp5 = tmp3 * tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp3 * tmp1
tmp8 = tl_math.sin(tmp7)
tmp9 = tmp6 * tmp8
tmp10 = tmp4 + tmp9
tmp11 = tmp10 - tmp5
tmp12 = tmp5 * tmp3
tmp13 = 4.0
tmp14 = tmp12 * tmp13
tmp15 = tmp11 + tmp14
tl.store(out_ptr0 + x0, tmp15, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mul_pow_sin_sub_0[grid(256)](arg0_1,
arg1_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class SineODENew(torch.nn.Module):
def y_exact(self, t):
return -0.5 * t ** 4 * torch.cos(2 * t) + 0.5 * t ** 3 * torch.sin(
2 * t) + 0.25 * t ** 2 * torch.cos(2 * t) - t ** 3 + 2 * t ** 4 + (
math.pi - 0.25) * t ** 2
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| TylerChoi1224/torchdiffeq | SineODE | false | 1,157 | [
"MIT"
] | 0 | 72f74d9651a58ab11cdadd60682f1b61e625ef53 | https://github.com/TylerChoi1224/torchdiffeq/tree/72f74d9651a58ab11cdadd60682f1b61e625ef53 | import math
import torch
class Model(torch.nn.Module):
def forward(self, t, y):
return 2 * y / t + t ** 4 * torch.sin(2 * t) - t ** 2 + 4 * t ** 3
def y_exact(self, t):
return -0.5 * t ** 4 * torch.cos(2 * t) + 0.5 * t ** 3 * torch.sin(
2 * t) + 0.25 * t ** 2 * torch.cos(2 * t) - t ** 3 + 2 * t ** 4 + (
math.pi - 0.25) * t ** 2
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
ObjectClassifier | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ov/covqeptjvlw6cvbyljoclwhqmwhv5thm2trm3s6dbqmk4is7svay.py
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => amax, clone, exp, sub, sum_1
# Graph fragment:
# %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%clone, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone, %amax), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (1))
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp9 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (2))
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp14 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (3))
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp7 = tmp4 + tmp6
tmp8 = triton_helpers.maximum(tmp3, tmp7)
tmp12 = tmp9 + tmp11
tmp13 = triton_helpers.maximum(tmp8, tmp12)
tmp17 = tmp14 + tmp16
tmp18 = triton_helpers.maximum(tmp13, tmp17)
tmp19 = tmp3 - tmp18
tmp20 = tl_math.exp(tmp19)
tmp21 = tmp7 - tmp18
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tmp24 = tmp12 - tmp18
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tmp27 = tmp17 - tmp18
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp26 + tmp28
tl.store(out_ptr0 + (x0), tmp18, xmask)
tl.store(out_ptr1 + (x0), tmp29, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/un/cunzgq3pakpc3fjm7kt4dvxu4tuhtevgp73xalsrx2wb6cl64t3a.py
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => amax, clone, log, sub, sub_1
# Graph fragment:
# %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%clone, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone, %amax), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_poi_fused__log_softmax_1 = async_compile.triton('triton_poi_fused__log_softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (y3 + (16*x2)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (y1 + (4*x2)), xmask & ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (y1 + (4*x2)), xmask & ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = tl_math.log(tmp5)
tmp7 = tmp4 - tmp6
tl.store(out_ptr0 + (x2 + (4*y3)), tmp7, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 1, 4), (1, 16, 4), torch.float32)
buf2 = empty_strided_cuda((4, 1, 4), (1, 16, 4), torch.float32)
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax_0.run(buf0, primals_2, buf1, buf2, 16, grid=grid(16), stream=stream0)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_1.run(buf0, primals_2, buf1, buf2, buf3, 16, 4, grid=grid(16, 4), stream=stream0)
del buf0
del buf1
del buf2
del primals_2
return (buf3, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class ObjectClassifier(nn.Module):
"""
perform log likelihood over sequence data ie. log(softmax), permute dimension
accordingly to meet NLLLoss requirement
Input: [seq_len, bsz, d_input]
Output: [bsz, num_classes, seq_len]
Usage:
bsz=5; seq=16; d_input=1024; num_classes=10
classiifer = ObjectClassifier(d_input, num_classes)
x = torch.rand(seq, bsz, d_input) # 16x5x1024
out = classifier(x) # 5x10x16
"""
def __init__(self, d_input, num_classes):
super(ObjectClassifier, self).__init__()
self.d_input = d_input
self.num_classes = num_classes
self.linear = nn.Linear(d_input, num_classes)
self.classifier = nn.LogSoftmax(dim=1)
self.init_weights()
def init_weights(self):
initrange = 0.1
self.linear.bias.data.zero_()
self.linear.weight.data.uniform_(-initrange, initrange)
def forward(self, x):
out = self.linear(x)
out = out.permute(1, 2, 0)
return self.classifier(out)
def extra_repr(self) ->str:
return 'SxBx%d -> Bx%dxS' % (self.d_input, self.num_classes)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'d_input': 4, 'num_classes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + 1)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + 2)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp14 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr1 + 3)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp7 = tmp4 + tmp6
tmp8 = triton_helpers.maximum(tmp3, tmp7)
tmp12 = tmp9 + tmp11
tmp13 = triton_helpers.maximum(tmp8, tmp12)
tmp17 = tmp14 + tmp16
tmp18 = triton_helpers.maximum(tmp13, tmp17)
tmp19 = tmp3 - tmp18
tmp20 = tl_math.exp(tmp19)
tmp21 = tmp7 - tmp18
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tmp24 = tmp12 - tmp18
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tmp27 = tmp17 - tmp18
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp26 + tmp28
tl.store(out_ptr0 + x0, tmp18, xmask)
tl.store(out_ptr1 + x0, tmp29, xmask)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (y3 + 16 * x2), xmask & ymask, eviction_policy
='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (y1 + 4 * x2), xmask & ymask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr3 + (y1 + 4 * x2), xmask & ymask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = tl_math.log(tmp5)
tmp7 = tmp4 - tmp6
tl.store(out_ptr0 + (x2 + 4 * y3), tmp7, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 1, 4), (1, 16, 4), torch.float32)
buf2 = empty_strided_cuda((4, 1, 4), (1, 16, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(16)](buf0, primals_2, buf1,
buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_1[grid(16, 4)](buf0, primals_2, buf1,
buf2, buf3, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del buf0
del buf1
del buf2
del primals_2
return buf3, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), buf3
class ObjectClassifierNew(nn.Module):
"""
perform log likelihood over sequence data ie. log(softmax), permute dimension
accordingly to meet NLLLoss requirement
Input: [seq_len, bsz, d_input]
Output: [bsz, num_classes, seq_len]
Usage:
bsz=5; seq=16; d_input=1024; num_classes=10
classiifer = ObjectClassifier(d_input, num_classes)
x = torch.rand(seq, bsz, d_input) # 16x5x1024
out = classifier(x) # 5x10x16
"""
def __init__(self, d_input, num_classes):
super(ObjectClassifierNew, self).__init__()
self.d_input = d_input
self.num_classes = num_classes
self.linear = nn.Linear(d_input, num_classes)
self.classifier = nn.LogSoftmax(dim=1)
self.init_weights()
def init_weights(self):
initrange = 0.1
self.linear.bias.data.zero_()
self.linear.weight.data.uniform_(-initrange, initrange)
def extra_repr(self) ->str:
return 'SxBx%d -> Bx%dxS' % (self.d_input, self.num_classes)
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| TuBui/deep_image_comparator | ObjectClassifier | false | 1,158 | [
"MIT"
] | 0 | 2dea7738d794b91a960ee9f41461a4e3ffcd5e44 | https://github.com/TuBui/deep_image_comparator/tree/2dea7738d794b91a960ee9f41461a4e3ffcd5e44 | import torch
from torch import nn
class Model(nn.Module):
"""
perform log likelihood over sequence data ie. log(softmax), permute dimension
accordingly to meet NLLLoss requirement
Input: [seq_len, bsz, d_input]
Output: [bsz, num_classes, seq_len]
Usage:
bsz=5; seq=16; d_input=1024; num_classes=10
classiifer = ObjectClassifier(d_input, num_classes)
x = torch.rand(seq, bsz, d_input) # 16x5x1024
out = classifier(x) # 5x10x16
"""
def __init__(self, d_input, num_classes):
super().__init__()
self.d_input = d_input
self.num_classes = num_classes
self.linear = nn.Linear(d_input, num_classes)
self.classifier = nn.LogSoftmax(dim=1)
self.init_weights()
def init_weights(self):
initrange = 0.1
self.linear.bias.data.zero_()
self.linear.weight.data.uniform_(-initrange, initrange)
def forward(self, x):
out = self.linear(x)
out = out.permute(1, 2, 0)
return self.classifier(out)
def extra_repr(self) ->str:
return 'SxBx%d -> Bx%dxS' % (self.d_input, self.num_classes)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
ResidualBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/tp/ctpzszvfnvtd5ojaacosoetsfrtkj6wdllzm3am7qstqgarfm24q.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16384
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = (yindex // 128)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (128*x2) + (1152*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/sl/csliarpndhsybo23yhfn5b2r4tprynajz5cbsvye42aegbnyzw43.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.reflection_pad2d]
# Source node to ATen node mapping:
# x => _unsafe_index, _unsafe_index_1
# Graph fragment:
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {})
triton_poi_fused_reflection_pad2d_1 = async_compile.triton('triton_poi_fused_reflection_pad2d_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512, 64], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 512
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 6
x3 = (xindex // 6)
y4 = yindex
x5 = xindex
y0 = yindex % 128
y1 = (yindex // 128)
tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x2))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x3))))) + (16*y4)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (128*x5) + (4608*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/cb/ccbds54cwkyq7jqwdqg7hx7relsicbkumz4trv6nl3ctaehzsqoy.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_1 => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/eo/ceolj4dwu42mgxponwssjmza5ue5guayvvryjnz2a2vloax4vori.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.repeat]
# Source node to ATen node mapping:
# x_2 => repeat
# Graph fragment:
# %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_4, [4]), kwargs = {})
triton_poi_fused_repeat_3 = async_compile.triton('triton_poi_fused_repeat_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_repeat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_repeat_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0 % 128), xmask)
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ur/curvfbwmi645mrjxmsitrqloj337hb7jgoszjdazdm3fu5onhv7j.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# x_2 => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_per_fused__native_batch_norm_legit_4 = async_compile.triton('triton_per_fused__native_batch_norm_legit_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[512, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_4(in_out_ptr0, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 512
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + ((128*r1) + (2048*(x0 // 128)) + (x0 % 128)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 16.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp21, xmask)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ou/coudlvtbu2mfzkslhudvyg5n42byzvjktxsgnf4o47oahwcsrzpo.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_3 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
triton_poi_fused_relu_5 = async_compile.triton('triton_poi_fused_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 128
x2 = (xindex // 2048)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x0 + (128*x2)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + (128*x2)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0 + (128*x2)), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0 + (128*x2)), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + (x3), tmp10, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/jo/cjo24qbf57gf35zx5kkb5xonisd4y2rirqfg5yuv5ouhaf3glzwo.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.reflection_pad2d]
# Source node to ATen node mapping:
# x_4 => _unsafe_index_2, _unsafe_index_3
# Graph fragment:
# %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_3 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_2, [None, None, None, %sub_1]), kwargs = {})
triton_poi_fused_reflection_pad2d_6 = async_compile.triton('triton_poi_fused_reflection_pad2d_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 18432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 128
x1 = (xindex // 128) % 6
x2 = (xindex // 768) % 6
x3 = (xindex // 4608)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (1920 + x0 + ((-512)*(tl_math.abs((-3) + (tl_math.abs((-1) + x2))))) + ((-128)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (2048*x3)), None)
tl.store(out_ptr0 + (x4), tmp0, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/in/cinjd7t36er3etzukhtxe2t6kdoofg4cde53pcjft626tdsuefeh.py
# Topologically Sorted Source Nodes: [x_7, x_8], Original ATen: [aten.relu, aten.add]
# Source node to ATen node mapping:
# x_7 => relu_1
# x_8 => add_4
# Graph fragment:
# %relu_1 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu, %relu_1), kwargs = {})
triton_poi_fused_add_relu_7 = async_compile.triton('triton_poi_fused_add_relu_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 128], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_relu_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 128
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y1 = (yindex // 16)
y0 = yindex % 16
tmp0 = tl.load(in_ptr0 + (x2 + (128*y3)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + (128*y3)), xmask & ymask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x2 + (128*y1)), xmask & ymask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr3 + (x2 + (128*y1)), xmask & ymask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + (x2 + (128*y1)), xmask & ymask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr5 + (x2 + (128*y1)), xmask & ymask, eviction_policy='evict_last')
tmp3 = tmp1 - tmp2
tmp5 = tmp3 * tmp4
tmp7 = tmp5 * tmp6
tmp9 = tmp7 + tmp8
tmp10 = tl.full([1, 1], 0, tl.int32)
tmp11 = triton_helpers.maximum(tmp10, tmp9)
tmp12 = tmp0 + tmp11
tl.store(out_ptr0 + (y0 + (16*x2) + (2048*y1)), tmp12, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 128, 4, 4), (2048, 16, 4, 1))
assert_size_stride(primals_2, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_3, (128, ), (1, ))
assert_size_stride(primals_4, (128, ), (1, ))
assert_size_stride(primals_5, (128, ), (1, ))
assert_size_stride(primals_6, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_7, (128, ), (1, ))
assert_size_stride(primals_8, (128, ), (1, ))
assert_size_stride(primals_9, (128, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_2, buf0, 16384, 9, grid=grid(16384, 9), stream=stream0)
del primals_2
buf1 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_0.run(primals_6, buf1, 16384, 9, grid=grid(16384, 9), stream=stream0)
del primals_6
buf2 = empty_strided_cuda((4, 128, 6, 6), (4608, 1, 768, 128), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.reflection_pad2d]
triton_poi_fused_reflection_pad2d_1.run(primals_1, buf2, 512, 36, grid=grid(512, 36), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 128, 4, 4), (2048, 1, 512, 128))
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf4, primals_3, 8192, grid=grid(8192), stream=stream0)
del primals_3
buf5 = empty_strided_cuda((512, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.repeat]
triton_poi_fused_repeat_3.run(primals_4, buf5, 512, grid=grid(512), stream=stream0)
del primals_4
buf6 = empty_strided_cuda((512, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.repeat]
triton_poi_fused_repeat_3.run(primals_5, buf6, 512, grid=grid(512), stream=stream0)
del primals_5
buf7 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32)
buf8 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32)
buf10 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_4.run(buf10, buf4, buf7, 512, 16, grid=grid(512), stream=stream0)
buf11 = empty_strided_cuda((4, 128, 4, 4), (2048, 1, 512, 128), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu]
triton_poi_fused_relu_5.run(buf4, buf7, buf10, buf5, buf6, buf11, 8192, grid=grid(8192), stream=stream0)
buf12 = empty_strided_cuda((4, 128, 6, 6), (4608, 1, 768, 128), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.reflection_pad2d]
triton_poi_fused_reflection_pad2d_6.run(buf11, buf12, 18432, grid=grid(18432), stream=stream0)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution]
buf13 = extern_kernels.convolution(buf12, buf1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 128, 4, 4), (2048, 1, 512, 128))
buf14 = buf13; del buf13 # reuse
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf14, primals_7, 8192, grid=grid(8192), stream=stream0)
del primals_7
buf15 = empty_strided_cuda((512, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.repeat]
triton_poi_fused_repeat_3.run(primals_8, buf15, 512, grid=grid(512), stream=stream0)
del primals_8
buf16 = empty_strided_cuda((512, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.repeat]
triton_poi_fused_repeat_3.run(primals_9, buf16, 512, grid=grid(512), stream=stream0)
del primals_9
buf17 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32)
buf18 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32)
buf20 = buf18; del buf18 # reuse
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_4.run(buf20, buf14, buf17, 512, 16, grid=grid(512), stream=stream0)
buf21 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_7, x_8], Original ATen: [aten.relu, aten.add]
triton_poi_fused_add_relu_7.run(buf11, buf14, buf17, buf20, buf15, buf16, buf21, 64, 128, grid=grid(64, 128), stream=stream0)
del buf11
return (buf21, buf0, buf1, buf2, buf4, buf5, buf6, buf7, buf10, buf12, buf14, buf15, buf16, buf17, buf20, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 128, 4, 4), (2048, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class GenericLayer(nn.Module):
def __init__(self, layer, out_channels, padding=(0, 0, 0, 0),
activation=None):
super(GenericLayer, self).__init__()
self._act = activation
self._layer = layer
self._norm = nn.InstanceNorm2d(out_channels, affine=True)
self._pad = nn.ReflectionPad2d(padding)
def forward(self, x):
x = self._pad(x)
x = self._layer(x)
x = self._norm(x)
if self._act is not None:
x = self._act(x)
return x
class ResidualBlock(nn.Module):
def __init__(self, channels, kernel_size, stride, padding=(0, 0, 0, 0)):
super(ResidualBlock, self).__init__()
self._conv_1 = GenericLayer(nn.Conv2d(128, 128, 3, 1), 128, (1, 1,
1, 1), nn.ReLU())
self._conv_2 = GenericLayer(nn.Conv2d(128, 128, 3, 1), 128, (1, 1,
1, 1), nn.ReLU())
def forward(self, x):
x = self._conv_1(x)
x = x + self._conv_2(x)
return x
def get_inputs():
return [torch.rand([4, 128, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4, 'kernel_size': 4, 'stride': 1}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_reflection_pad2d_1(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 512
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 6
x3 = xindex // 6
y4 = yindex
x5 = xindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x2)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x3)) + 16 * y4),
xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + 128 * x5 + 4608 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, None)
@triton.jit
def triton_poi_fused_repeat_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0 % 128, xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_4(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 512
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (128 * r1 + 2048 * (x0 // 128) + x0 % 128),
xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 16.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 128
x2 = xindex // 2048
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (x0 + 128 * x2), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + 128 * x2), None, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr3 + (x0 + 128 * x2), None, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr4 + (x0 + 128 * x2), None, eviction_policy=
'evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + x3, tmp10, None)
@triton.jit
def triton_poi_fused_reflection_pad2d_6(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 128
x1 = xindex // 128 % 6
x2 = xindex // 768 % 6
x3 = xindex // 4608
x4 = xindex
tmp0 = tl.load(in_ptr0 + (1920 + x0 + -512 * tl_math.abs(-3 + tl_math.
abs(-1 + x2)) + -128 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) +
2048 * x3), None)
tl.store(out_ptr0 + x4, tmp0, None)
@triton.jit
def triton_poi_fused_add_relu_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.
constexpr):
ynumel = 64
xnumel = 128
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y1 = yindex // 16
y0 = yindex % 16
tmp0 = tl.load(in_ptr0 + (x2 + 128 * y3), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + 128 * y3), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x2 + 128 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp4 = tl.load(in_ptr3 + (x2 + 128 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + (x2 + 128 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp8 = tl.load(in_ptr5 + (x2 + 128 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp3 = tmp1 - tmp2
tmp5 = tmp3 * tmp4
tmp7 = tmp5 * tmp6
tmp9 = tmp7 + tmp8
tmp10 = tl.full([1, 1], 0, tl.int32)
tmp11 = triton_helpers.maximum(tmp10, tmp9)
tmp12 = tmp0 + tmp11
tl.store(out_ptr0 + (y0 + 16 * x2 + 2048 * y1), tmp12, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 128, 4, 4), (2048, 16, 4, 1))
assert_size_stride(primals_2, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_3, (128,), (1,))
assert_size_stride(primals_4, (128,), (1,))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (128,), (1,))
assert_size_stride(primals_9, (128,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16384, 9)](primals_2, buf0, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_0[grid(16384, 9)](primals_6, buf1, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf2 = empty_strided_cuda((4, 128, 6, 6), (4608, 1, 768, 128),
torch.float32)
triton_poi_fused_reflection_pad2d_1[grid(512, 36)](primals_1, buf2,
512, 36, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf3 = extern_kernels.convolution(buf2, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 128, 4, 4), (2048, 1, 512, 128))
buf4 = buf3
del buf3
triton_poi_fused_convolution_2[grid(8192)](buf4, primals_3, 8192,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf5 = empty_strided_cuda((512,), (1,), torch.float32)
triton_poi_fused_repeat_3[grid(512)](primals_4, buf5, 512, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_4
buf6 = empty_strided_cuda((512,), (1,), torch.float32)
triton_poi_fused_repeat_3[grid(512)](primals_5, buf6, 512, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_5
buf7 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch
.float32)
buf8 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch
.float32)
buf10 = buf8
del buf8
triton_per_fused__native_batch_norm_legit_4[grid(512)](buf10, buf4,
buf7, 512, 16, XBLOCK=8, num_warps=2, num_stages=1)
buf11 = empty_strided_cuda((4, 128, 4, 4), (2048, 1, 512, 128),
torch.float32)
triton_poi_fused_relu_5[grid(8192)](buf4, buf7, buf10, buf5, buf6,
buf11, 8192, XBLOCK=128, num_warps=4, num_stages=1)
buf12 = empty_strided_cuda((4, 128, 6, 6), (4608, 1, 768, 128),
torch.float32)
triton_poi_fused_reflection_pad2d_6[grid(18432)](buf11, buf12,
18432, XBLOCK=256, num_warps=4, num_stages=1)
buf13 = extern_kernels.convolution(buf12, buf1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 128, 4, 4), (2048, 1, 512, 128))
buf14 = buf13
del buf13
triton_poi_fused_convolution_2[grid(8192)](buf14, primals_7, 8192,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
buf15 = empty_strided_cuda((512,), (1,), torch.float32)
triton_poi_fused_repeat_3[grid(512)](primals_8, buf15, 512, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_8
buf16 = empty_strided_cuda((512,), (1,), torch.float32)
triton_poi_fused_repeat_3[grid(512)](primals_9, buf16, 512, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_9
buf17 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
buf18 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
buf20 = buf18
del buf18
triton_per_fused__native_batch_norm_legit_4[grid(512)](buf20, buf14,
buf17, 512, 16, XBLOCK=8, num_warps=2, num_stages=1)
buf21 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.
float32)
triton_poi_fused_add_relu_7[grid(64, 128)](buf11, buf14, buf17,
buf20, buf15, buf16, buf21, 64, 128, XBLOCK=4, YBLOCK=64,
num_warps=4, num_stages=1)
del buf11
return (buf21, buf0, buf1, buf2, buf4, buf5, buf6, buf7, buf10, buf12,
buf14, buf15, buf16, buf17, buf20)
class GenericLayer(nn.Module):
def __init__(self, layer, out_channels, padding=(0, 0, 0, 0),
activation=None):
super(GenericLayer, self).__init__()
self._act = activation
self._layer = layer
self._norm = nn.InstanceNorm2d(out_channels, affine=True)
self._pad = nn.ReflectionPad2d(padding)
def forward(self, x):
x = self._pad(x)
x = self._layer(x)
x = self._norm(x)
if self._act is not None:
x = self._act(x)
return x
class ResidualBlockNew(nn.Module):
def __init__(self, channels, kernel_size, stride, padding=(0, 0, 0, 0)):
super(ResidualBlockNew, self).__init__()
self._conv_1 = GenericLayer(nn.Conv2d(128, 128, 3, 1), 128, (1, 1,
1, 1), nn.ReLU())
self._conv_2 = GenericLayer(nn.Conv2d(128, 128, 3, 1), 128, (1, 1,
1, 1), nn.ReLU())
def forward(self, input_0):
primals_2 = self._conv_1._layer.weight
primals_3 = self._conv_1._layer.bias
primals_4 = self._conv_1._norm.weight
primals_5 = self._conv_1._norm.bias
primals_6 = self._conv_2._layer.weight
primals_7 = self._conv_2._layer.bias
primals_8 = self._conv_2._norm.weight
primals_9 = self._conv_2._norm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| ThomasRanvier/cnn_style_transfer | ResidualBlock | false | 1,159 | [
"MIT"
] | 0 | 90b6c76c20263c22f4e45184d572284726ecbd7b | https://github.com/ThomasRanvier/cnn_style_transfer/tree/90b6c76c20263c22f4e45184d572284726ecbd7b | import torch
import torch.nn as nn
class GenericLayer(nn.Module):
def __init__(self, layer, out_channels, padding=(0, 0, 0, 0),
activation=None):
super().__init__()
self._act = activation
self._layer = layer
self._norm = nn.InstanceNorm2d(out_channels, affine=True)
self._pad = nn.ReflectionPad2d(padding)
def forward(self, x):
x = self._pad(x)
x = self._layer(x)
x = self._norm(x)
if self._act is not None:
x = self._act(x)
return x
class Model(nn.Module):
def __init__(self, channels, kernel_size, stride, padding=(0, 0, 0, 0)):
super().__init__()
self._conv_1 = GenericLayer(nn.Conv2d(128, 128, 3, 1), 128, (1, 1,
1, 1), nn.ReLU())
self._conv_2 = GenericLayer(nn.Conv2d(128, 128, 3, 1), 128, (1, 1,
1, 1), nn.ReLU())
def forward(self, x):
x = self._conv_1(x)
x = x + self._conv_2(x)
return x
def get_inputs():
return [torch.rand([4, 128, 4, 4])]
def get_init_inputs():
return [4, 4, 1]
|
FirstBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ro/crodqpemvzed7dwj6ikboerwmi47yfap7jg5xgrgodxuptzllrip.py
# Topologically Sorted Source Nodes: [mul, leaky_relu], Original ATen: [aten.mul, aten.leaky_relu, aten.leaky_relu_backward]
# Source node to ATen node mapping:
# leaky_relu => gt, mul_1, where
# mul => mul
# Graph fragment:
# %mul : [num_users=3] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 1.0), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%mul, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, 0.2), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_1), kwargs = {})
# %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where, 0), kwargs = {})
triton_poi_fused_leaky_relu_leaky_relu_backward_mul_0 = async_compile.triton('triton_poi_fused_leaky_relu_leaky_relu_backward_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_leaky_relu_backward_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_leaky_relu_backward_mul_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(in_out_ptr0 + (x0), tmp7, xmask)
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [mul, leaky_relu], Original ATen: [aten.mul, aten.leaky_relu, aten.leaky_relu_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_leaky_relu_leaky_relu_backward_mul_0.run(buf1, buf2, 256, grid=grid(256), stream=stream0)
return (buf1, primals_1, primals_2, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
class BatchNormLayer(nn.Module):
"""Implements batch normalization layer."""
def __init__(self, channels, gamma=False, beta=True, decay=0.9, epsilon
=1e-05):
"""Initializes with basic settings.
Args:
channels: Number of channels of the input tensor.
gamma: Whether the scale (weight) of the affine mapping is learnable.
beta: Whether the center (bias) of the affine mapping is learnable.
decay: Decay factor for moving average operations in this layer.
epsilon: A value added to the denominator for numerical stability.
"""
super().__init__()
self.bn = nn.BatchNorm2d(num_features=channels, affine=True,
track_running_stats=True, momentum=1 - decay, eps=epsilon)
self.bn.weight.requires_grad = gamma
self.bn.bias.requires_grad = beta
def forward(self, x):
return self.bn(x)
class FirstBlock(nn.Module):
"""Implements the first block, which is a convolutional block."""
def __init__(self, in_channels, out_channels, use_wscale=False,
wscale_gain=np.sqrt(2.0), use_bn=False, activation_type='lrelu'):
super().__init__()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.scale = wscale_gain / np.sqrt(in_channels * 3 * 3
) if use_wscale else 1.0
self.bn = BatchNormLayer(channels=out_channels
) if use_bn else nn.Identity()
if activation_type == 'linear':
self.activate = nn.Identity()
elif activation_type == 'lrelu':
self.activate = nn.LeakyReLU(negative_slope=0.2, inplace=True)
else:
raise NotImplementedError(
f'Not implemented activation function: {activation_type}!')
def forward(self, x):
return self.activate(self.bn(self.conv(x) * self.scale))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_leaky_relu_leaky_relu_backward_mul_0(in_out_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(in_out_ptr0 + x0, tmp7, xmask)
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_leaky_relu_leaky_relu_backward_mul_0[grid(256)](buf1,
buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
return buf1, primals_1, primals_2, buf2
class BatchNormLayer(nn.Module):
"""Implements batch normalization layer."""
def __init__(self, channels, gamma=False, beta=True, decay=0.9, epsilon
=1e-05):
"""Initializes with basic settings.
Args:
channels: Number of channels of the input tensor.
gamma: Whether the scale (weight) of the affine mapping is learnable.
beta: Whether the center (bias) of the affine mapping is learnable.
decay: Decay factor for moving average operations in this layer.
epsilon: A value added to the denominator for numerical stability.
"""
super().__init__()
self.bn = nn.BatchNorm2d(num_features=channels, affine=True,
track_running_stats=True, momentum=1 - decay, eps=epsilon)
self.bn.weight.requires_grad = gamma
self.bn.bias.requires_grad = beta
def forward(self, x):
return self.bn(x)
class FirstBlockNew(nn.Module):
"""Implements the first block, which is a convolutional block."""
def __init__(self, in_channels, out_channels, use_wscale=False,
wscale_gain=np.sqrt(2.0), use_bn=False, activation_type='lrelu'):
super().__init__()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.scale = wscale_gain / np.sqrt(in_channels * 3 * 3
) if use_wscale else 1.0
self.bn = BatchNormLayer(channels=out_channels
) if use_bn else nn.Identity()
if activation_type == 'linear':
self.activate = nn.Identity()
elif activation_type == 'lrelu':
self.activate = nn.LeakyReLU(negative_slope=0.2, inplace=True)
else:
raise NotImplementedError(
f'Not implemented activation function: {activation_type}!')
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| Twizwei/idinvert_pytorch | FirstBlock | false | 1,160 | [
"MIT"
] | 0 | 11f1126aab517fbe32b488d92f6fdea339463d04 | https://github.com/Twizwei/idinvert_pytorch/tree/11f1126aab517fbe32b488d92f6fdea339463d04 | import torch
import numpy as np
import torch.nn as nn
class BatchNormLayer(nn.Module):
"""Implements batch normalization layer."""
def __init__(self, channels, gamma=False, beta=True, decay=0.9, epsilon
=1e-05):
"""Initializes with basic settings.
Args:
channels: Number of channels of the input tensor.
gamma: Whether the scale (weight) of the affine mapping is learnable.
beta: Whether the center (bias) of the affine mapping is learnable.
decay: Decay factor for moving average operations in this layer.
epsilon: A value added to the denominator for numerical stability.
"""
super().__init__()
self.bn = nn.BatchNorm2d(num_features=channels, affine=True,
track_running_stats=True, momentum=1 - decay, eps=epsilon)
self.bn.weight.requires_grad = gamma
self.bn.bias.requires_grad = beta
def forward(self, x):
return self.bn(x)
class Model(nn.Module):
"""Implements the first block, which is a convolutional block."""
def __init__(self, in_channels, out_channels, use_wscale=False,
wscale_gain=np.sqrt(2.0), use_bn=False, activation_type='lrelu'):
super().__init__()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.scale = wscale_gain / np.sqrt(in_channels * 3 * 3
) if use_wscale else 1.0
self.bn = BatchNormLayer(channels=out_channels
) if use_bn else nn.Identity()
if activation_type == 'linear':
self.activate = nn.Identity()
elif activation_type == 'lrelu':
self.activate = nn.LeakyReLU(negative_slope=0.2, inplace=True)
else:
raise NotImplementedError(
f'Not implemented activation function: {activation_type}!')
def forward(self, x):
return self.activate(self.bn(self.conv(x) * self.scale))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
HR2O_NL | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/bu/cbugpb6sy2krqqw6gofr6xdgw2qe5awzyglopufmaia3yczs4fvt.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 262144
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = (yindex // 512)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (512*x2) + (4608*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/74/c74puez3sfsj5izoa4nzsrn6rwsxcpwkioksgkrbzorzfs2oledu.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2048
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = (yindex // 512)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), None, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (512*x2) + (2097152*y1)), tmp0, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ls/clsxanrdjy6vuj2bpvumllvohyhe3tm7hn75d4z5r2g6hejzqqgm.py
# Topologically Sorted Source Nodes: [mul, sum_1], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# mul => mul
# sum_1 => sum_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze, %unsqueeze_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [2]), kwargs = {})
triton_red_fused_mul_sum_2 = async_compile.triton('triton_red_fused_mul_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[65536, 512],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_mul_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 65536
rnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 4096
x2 = (xindex // 16384)
x4 = xindex % 16384
_tmp4 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
x5 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r3 = rindex
tmp0 = tl.load(in_ptr0 + (r3 + (512*x0) + (2097152*x2)), rmask, eviction_policy='evict_last', other=0.0)
tmp1 = tl.load(in_ptr1 + (r3 + (512*x4)), rmask, eviction_policy='evict_last', other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = _tmp4 + tmp3
_tmp4 = tl.where(rmask, tmp5, _tmp4)
tmp4 = tl.sum(_tmp4, 1)[:, None]
tl.store(out_ptr0 + (x5), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/nb/cnbnicltnklwrb3lyiieb74krqfex3ugnyrw57jpumynzoellfo5.py
# Topologically Sorted Source Nodes: [att_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# att_1 => exp
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 22.627416997969522), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 4096
x2 = (xindex // 16384)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp3 = tl.load(in_ptr0 + (x0 + (16384*x2)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (4096 + x0 + (16384*x2)), None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (8192 + x0 + (16384*x2)), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (12288 + x0 + (16384*x2)), None, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.044194173824159216
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + (x3), tmp17, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ba/cbacxpyh6bhme7f2qneb7gf3damuwonjarfooiwlzn3n565swwzd.py
# Topologically Sorted Source Nodes: [att_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# att_1 => div_1, sum_2
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_2), kwargs = {})
triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y1 = (yindex // 4)
y0 = yindex % 4
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (x2 + (16384*y1)), ymask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4096 + x2 + (16384*y1)), ymask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8192 + x2 + (16384*y1)), ymask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12288 + x2 + (16384*y1)), ymask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (y0 + (4*x2) + (16384*y1)), tmp8, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/6a/c6a3mxzx5j7ad2bj44gu2n2tflqq4umutwce7lnfdlx74u255xbv.py
# Topologically Sorted Source Nodes: [mul_1, virt_feats], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# mul_1 => mul_1
# virt_feats => sum_3
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_2, %convolution_2), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [1]), kwargs = {})
triton_poi_fused_mul_sum_5 = async_compile.triton('triton_poi_fused_mul_sum_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8388608],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sum_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8388608
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = (xindex // 512)
x4 = xindex % 2097152
x5 = xindex
tmp0 = tl.load(in_ptr0 + (4*x3), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x4), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x3)), None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (2097152 + x4), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x3)), None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (4194304 + x4), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x3)), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (6291456 + x4), None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + (x5), tmp14, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/oo/coov7m3uyy2j2bksnalgn4jtug226vz6ulyjvw4457ooqarrjlgy.py
# Topologically Sorted Source Nodes: [virt_feats_1], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# virt_feats_1 => var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True})
triton_per_fused_native_group_norm_6 = async_compile.triton('triton_per_fused_native_group_norm_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[65536, 128],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 5, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_native_group_norm_6(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 65536
rnumel = 128
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r3 = rindex
x0 = xindex % 128
x1 = (xindex // 128) % 128
x2 = (xindex // 16384)
x4 = xindex
tmp0 = tl.load(in_ptr0 + ((4*x0) + (512*((r3 + (128*x1)) % 4096)) + (2097152*x2) + ((r3 + (128*x1)) // 4096)), None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.full([XBLOCK, 1], 128, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.sum(tmp11, 1)[:, None]
tl.store(out_ptr0 + (x4), tmp8, None)
tl.store(out_ptr1 + (x4), tmp13, None)
tl.store(out_ptr2 + (x4), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/6d/c6dozl777wqqkkbqxjpberj7tzb3xykuedsvyncdpamfyhbmmaal.py
# Topologically Sorted Source Nodes: [virt_feats_1], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# virt_feats_1 => var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True})
triton_per_fused_native_group_norm_7 = async_compile.triton('triton_per_fused_native_group_norm_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[512, 128],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_native_group_norm_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 512
rnumel = 128
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 128
x1 = (xindex // 128)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (128*r2) + (16384*x1)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (x0 + (128*r2) + (16384*x1)), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (x0 + (128*r2) + (16384*x1)), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp15 = tmp12[:, None]
tl.store(out_ptr0 + (x3), tmp13, xmask)
tl.store(out_ptr1 + (x3), tmp14, xmask)
tl.store(out_ptr2 + (x3), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/vg/cvg4gd4pleupedkmk7sudp3ojzudfhlxqbdm6touhpkhbpiwgvev.py
# Topologically Sorted Source Nodes: [virt_feats_1], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# virt_feats_1 => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_per_fused_native_group_norm_8 = async_compile.triton('triton_per_fused_native_group_norm_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 128],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_native_group_norm_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 128
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (128*x0)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + (128*x0)), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (r1 + (128*x0)), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp15 = tmp12[:, None]
tmp16 = 2097152.0
tmp17 = tmp14 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tl.store(out_ptr2 + (x0), tmp20, xmask)
tl.store(out_ptr0 + (x0), tmp13, xmask)
tl.store(out_ptr1 + (x0), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/a3/ca3amw2gaefhv4xzpojtuerdftf6ozu5budywre4cxqypl2l3gbv.py
# Topologically Sorted Source Nodes: [virt_feats_1, virt_feats_2], Original ATen: [aten.native_group_norm, aten.relu]
# Source node to ATen node mapping:
# virt_feats_1 => add_1, mul_3
# virt_feats_2 => relu
# Graph fragment:
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_8), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %unsqueeze_5), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_1,), kwargs = {})
triton_poi_fused_native_group_norm_relu_9 = async_compile.triton('triton_poi_fused_native_group_norm_relu_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8388608],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_group_norm_relu_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 8388608
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x2 = (xindex // 2097152)
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x2), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (x0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 2097152.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(in_out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/m3/cm3lzwerekxorxerue5hlic7yboixaflorl27immll3gt7g23bmm.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x => add_2
# Graph fragment:
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %convolution_3), kwargs = {})
triton_poi_fused_add_10 = async_compile.triton('triton_poi_fused_add_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384, 512], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_10(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16384
xnumel = 512
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4096
y1 = (yindex // 4096)
tmp0 = tl.load(in_ptr0 + (x2 + (512*y3)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + (512*y3)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (y0 + (4096*x2) + (2097152*y1)), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_2, (4, 512, 64, 64), (2097152, 4096, 64, 1))
assert_size_stride(primals_3, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_4, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_5, (512, ), (1, ))
assert_size_stride(primals_6, (512, ), (1, ))
assert_size_stride(primals_7, (512, 512, 3, 3), (4608, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_1, buf0, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_2, buf1, 2048, 4096, grid=grid(2048, 4096), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_0.run(primals_3, buf2, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_3
buf3 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_0.run(primals_4, buf3, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_4
buf4 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_0.run(primals_7, buf4, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 512, 64, 64), (2097152, 1, 32768, 512))
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf1, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 512, 64, 64), (2097152, 1, 32768, 512))
buf7 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, sum_1], Original ATen: [aten.mul, aten.sum]
triton_red_fused_mul_sum_2.run(buf5, buf6, buf7, 65536, 512, grid=grid(65536), stream=stream0)
buf8 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [att_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf7, buf8, 65536, grid=grid(65536), stream=stream0)
buf9 = reinterpret_tensor(buf7, (4, 4, 64, 64), (16384, 1, 256, 4), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [att_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf8, buf9, 16, 4096, grid=grid(16, 4096), stream=stream0)
# Topologically Sorted Source Nodes: [value], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf1, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 512, 64, 64), (2097152, 1, 32768, 512))
buf11 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512), torch.float32)
# Topologically Sorted Source Nodes: [mul_1, virt_feats], Original ATen: [aten.mul, aten.sum]
triton_poi_fused_mul_sum_5.run(buf9, buf10, buf11, 8388608, grid=grid(8388608), stream=stream0)
buf12 = reinterpret_tensor(buf8, (4, 1, 1, 1, 128, 128), (16384, 65536, 65536, 65536, 1, 128), 0); del buf8 # reuse
buf13 = empty_strided_cuda((4, 1, 1, 1, 128, 128), (16384, 65536, 65536, 65536, 1, 128), torch.float32)
buf14 = empty_strided_cuda((4, 1, 1, 1, 128, 128), (16384, 65536, 65536, 65536, 1, 128), torch.float32)
# Topologically Sorted Source Nodes: [virt_feats_1], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_6.run(buf11, buf12, buf13, buf14, 65536, 128, grid=grid(65536), stream=stream0)
buf15 = empty_strided_cuda((4, 1, 1, 1, 128), (128, 512, 512, 512, 1), torch.float32)
buf16 = empty_strided_cuda((4, 1, 1, 1, 128), (128, 512, 512, 512, 1), torch.float32)
buf17 = empty_strided_cuda((4, 1, 1, 1, 128), (128, 512, 512, 512, 1), torch.float32)
# Topologically Sorted Source Nodes: [virt_feats_1], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_7.run(buf12, buf13, buf14, buf15, buf16, buf17, 512, 128, grid=grid(512), stream=stream0)
del buf12
del buf13
del buf14
buf18 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf19 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf21 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
# Topologically Sorted Source Nodes: [virt_feats_1], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_8.run(buf15, buf16, buf17, buf18, buf19, buf21, 4, 128, grid=grid(4), stream=stream0)
del buf15
del buf16
del buf17
buf22 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [virt_feats_1, virt_feats_2], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_9.run(buf22, buf18, buf19, primals_5, primals_6, 8388608, grid=grid(8388608), stream=stream0)
del buf19
del primals_6
# Topologically Sorted Source Nodes: [virt_feats_3], Original ATen: [aten.convolution]
buf23 = extern_kernels.convolution(buf22, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf23, (4, 512, 64, 64), (2097152, 1, 32768, 512))
buf24 = empty_strided_cuda((4, 512, 64, 64), (2097152, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.add]
triton_poi_fused_add_10.run(buf1, buf23, buf24, 16384, 512, grid=grid(16384, 512), stream=stream0)
del buf23
return (buf24, buf0, buf1, buf2, buf3, primals_5, buf4, buf5, buf6, buf9, buf10, reinterpret_tensor(buf18, (4, 1), (1, 1), 0), reinterpret_tensor(buf21, (4, 1), (1, 1), 0), buf22, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 512, 64, 64), (2097152, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class HR2O_NL(nn.Module):
def __init__(self, hidden_dim=512, kernel_size=3, mlp_1x1=False):
super(HR2O_NL, self).__init__()
self.hidden_dim = hidden_dim
padding = kernel_size // 2
self.conv_q = nn.Conv2d(hidden_dim, hidden_dim, kernel_size,
padding=padding, bias=False)
self.conv_k = nn.Conv2d(hidden_dim, hidden_dim, kernel_size,
padding=padding, bias=False)
self.conv_v = nn.Conv2d(hidden_dim, hidden_dim, kernel_size,
padding=padding, bias=False)
self.conv = nn.Conv2d(hidden_dim, hidden_dim, 1 if mlp_1x1 else
kernel_size, padding=0 if mlp_1x1 else padding, bias=False)
self.norm = nn.GroupNorm(1, hidden_dim, affine=True)
self.dp = nn.Dropout(0.2)
def forward(self, x):
query = self.conv_q(x).unsqueeze(1)
key = self.conv_k(x).unsqueeze(0)
att = (query * key).sum(2) / self.hidden_dim ** 0.5
att = nn.Softmax(dim=1)(att)
value = self.conv_v(x)
virt_feats = (att.unsqueeze(2) * value).sum(1)
virt_feats = self.norm(virt_feats)
virt_feats = nn.functional.relu(virt_feats)
virt_feats = self.conv(virt_feats)
virt_feats = self.dp(virt_feats)
x = x + virt_feats
return x
def get_inputs():
return [torch.rand([4, 512, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 512 * x2 + 2097152 * y1), tmp0, None)
@triton.jit
def triton_red_fused_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
rnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 4096
x2 = xindex // 16384
x4 = xindex % 16384
_tmp4 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
x5 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r3 = rindex
tmp0 = tl.load(in_ptr0 + (r3 + 512 * x0 + 2097152 * x2), rmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.load(in_ptr1 + (r3 + 512 * x4), rmask, eviction_policy=
'evict_last', other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = _tmp4 + tmp3
_tmp4 = tl.where(rmask, tmp5, _tmp4)
tmp4 = tl.sum(_tmp4, 1)[:, None]
tl.store(out_ptr0 + x5, tmp4, None)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 4096
x2 = xindex // 16384
tmp0 = tl.load(in_ptr0 + x3, None)
tmp3 = tl.load(in_ptr0 + (x0 + 16384 * x2), None, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (4096 + x0 + 16384 * x2), None,
eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (8192 + x0 + 16384 * x2), None,
eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (12288 + x0 + 16384 * x2), None,
eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.044194173824159216
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x3, tmp17, None)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y1 = yindex // 4
y0 = yindex % 4
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (x2 + 16384 * y1), ymask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4096 + x2 + 16384 * y1), ymask,
eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8192 + x2 + 16384 * y1), ymask,
eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12288 + x2 + 16384 * y1), ymask,
eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (y0 + 4 * x2 + 16384 * y1), tmp8, ymask)
@triton.jit
def triton_poi_fused_mul_sum_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex // 512
x4 = xindex % 2097152
x5 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x3, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x4, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x3), None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (2097152 + x4), None, eviction_policy='evict_last'
)
tmp7 = tl.load(in_ptr0 + (2 + 4 * x3), None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (4194304 + x4), None, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (3 + 4 * x3), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (6291456 + x4), None, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x5, tmp14, None)
@triton.jit
def triton_per_fused_native_group_norm_6(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r3 = rindex
x0 = xindex % 128
x1 = xindex // 128 % 128
x2 = xindex // 16384
x4 = xindex
tmp0 = tl.load(in_ptr0 + (4 * x0 + 512 * ((r3 + 128 * x1) % 4096) +
2097152 * x2 + (r3 + 128 * x1) // 4096), None, eviction_policy=
'evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.full([XBLOCK, 1], 128, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.sum(tmp11, 1)[:, None]
tl.store(out_ptr0 + x4, tmp8, None)
tl.store(out_ptr1 + x4, tmp13, None)
tl.store(out_ptr2 + x4, tmp7, None)
@triton.jit
def triton_per_fused_native_group_norm_7(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 512
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 128
x1 = xindex // 128
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * r2 + 16384 * x1), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (x0 + 128 * r2 + 16384 * x1), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (x0 + 128 * r2 + 16384 * x1), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp15 = tmp12[:, None]
tl.store(out_ptr0 + x3, tmp13, xmask)
tl.store(out_ptr1 + x3, tmp14, xmask)
tl.store(out_ptr2 + x3, tmp15, xmask)
@triton.jit
def triton_per_fused_native_group_norm_8(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 128 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 128 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (r1 + 128 * x0), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp12[:, None]
tmp16 = 2097152.0
tmp17 = tmp14 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tl.store(out_ptr2 + x0, tmp20, xmask)
tl.store(out_ptr0 + x0, tmp13, xmask)
tl.store(out_ptr1 + x0, tmp14, xmask)
@triton.jit
def triton_poi_fused_native_group_norm_relu_9(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x2 = xindex // 2097152
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x2, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 2097152.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(in_out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_poi_fused_add_10(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
xnumel = 512
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4096
y1 = yindex // 4096
tmp0 = tl.load(in_ptr0 + (x2 + 512 * y3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + 512 * y3), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (y0 + 4096 * x2 + 2097152 * y1), tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_2, (4, 512, 64, 64), (2097152, 4096, 64, 1))
assert_size_stride(primals_3, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_4, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_5, (512,), (1,))
assert_size_stride(primals_6, (512,), (1,))
assert_size_stride(primals_7, (512, 512, 3, 3), (4608, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(262144, 9)](primals_1, buf0, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512
), torch.float32)
triton_poi_fused_1[grid(2048, 4096)](primals_2, buf1, 2048, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_0[grid(262144, 9)](primals_3, buf2, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_3
buf3 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_0[grid(262144, 9)](primals_4, buf3, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf4 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_0[grid(262144, 9)](primals_7, buf4, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_7
buf5 = extern_kernels.convolution(buf1, buf0, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 512, 64, 64), (2097152, 1, 32768, 512))
buf6 = extern_kernels.convolution(buf1, buf2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 512, 64, 64), (2097152, 1, 32768, 512))
buf7 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1),
torch.float32)
triton_red_fused_mul_sum_2[grid(65536)](buf5, buf6, buf7, 65536,
512, XBLOCK=64, RBLOCK=64, num_warps=16, num_stages=1)
buf8 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1),
torch.float32)
triton_poi_fused__softmax_3[grid(65536)](buf7, buf8, 65536, XBLOCK=
256, num_warps=4, num_stages=1)
buf9 = reinterpret_tensor(buf7, (4, 4, 64, 64), (16384, 1, 256, 4), 0)
del buf7
triton_poi_fused__softmax_4[grid(16, 4096)](buf8, buf9, 16, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
buf10 = extern_kernels.convolution(buf1, buf3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 512, 64, 64), (2097152, 1, 32768, 512))
buf11 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768,
512), torch.float32)
triton_poi_fused_mul_sum_5[grid(8388608)](buf9, buf10, buf11,
8388608, XBLOCK=1024, num_warps=4, num_stages=1)
buf12 = reinterpret_tensor(buf8, (4, 1, 1, 1, 128, 128), (16384,
65536, 65536, 65536, 1, 128), 0)
del buf8
buf13 = empty_strided_cuda((4, 1, 1, 1, 128, 128), (16384, 65536,
65536, 65536, 1, 128), torch.float32)
buf14 = empty_strided_cuda((4, 1, 1, 1, 128, 128), (16384, 65536,
65536, 65536, 1, 128), torch.float32)
triton_per_fused_native_group_norm_6[grid(65536)](buf11, buf12,
buf13, buf14, 65536, 128, XBLOCK=32, num_warps=8, num_stages=1)
buf15 = empty_strided_cuda((4, 1, 1, 1, 128), (128, 512, 512, 512,
1), torch.float32)
buf16 = empty_strided_cuda((4, 1, 1, 1, 128), (128, 512, 512, 512,
1), torch.float32)
buf17 = empty_strided_cuda((4, 1, 1, 1, 128), (128, 512, 512, 512,
1), torch.float32)
triton_per_fused_native_group_norm_7[grid(512)](buf12, buf13, buf14,
buf15, buf16, buf17, 512, 128, XBLOCK=1, num_warps=2, num_stages=1)
del buf12
del buf13
del buf14
buf18 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf19 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf21 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
triton_per_fused_native_group_norm_8[grid(4)](buf15, buf16, buf17,
buf18, buf19, buf21, 4, 128, XBLOCK=1, num_warps=2, num_stages=1)
del buf15
del buf16
del buf17
buf22 = buf11
del buf11
triton_poi_fused_native_group_norm_relu_9[grid(8388608)](buf22,
buf18, buf19, primals_5, primals_6, 8388608, XBLOCK=512,
num_warps=8, num_stages=1)
del buf19
del primals_6
buf23 = extern_kernels.convolution(buf22, buf4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf23, (4, 512, 64, 64), (2097152, 1, 32768, 512))
buf24 = empty_strided_cuda((4, 512, 64, 64), (2097152, 4096, 64, 1),
torch.float32)
triton_poi_fused_add_10[grid(16384, 512)](buf1, buf23, buf24, 16384,
512, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del buf23
return (buf24, buf0, buf1, buf2, buf3, primals_5, buf4, buf5, buf6,
buf9, buf10, reinterpret_tensor(buf18, (4, 1), (1, 1), 0),
reinterpret_tensor(buf21, (4, 1), (1, 1), 0), buf22)
class HR2O_NLNew(nn.Module):
def __init__(self, hidden_dim=512, kernel_size=3, mlp_1x1=False):
super(HR2O_NLNew, self).__init__()
self.hidden_dim = hidden_dim
padding = kernel_size // 2
self.conv_q = nn.Conv2d(hidden_dim, hidden_dim, kernel_size,
padding=padding, bias=False)
self.conv_k = nn.Conv2d(hidden_dim, hidden_dim, kernel_size,
padding=padding, bias=False)
self.conv_v = nn.Conv2d(hidden_dim, hidden_dim, kernel_size,
padding=padding, bias=False)
self.conv = nn.Conv2d(hidden_dim, hidden_dim, 1 if mlp_1x1 else
kernel_size, padding=0 if mlp_1x1 else padding, bias=False)
self.norm = nn.GroupNorm(1, hidden_dim, affine=True)
self.dp = nn.Dropout(0.2)
def forward(self, input_0):
primals_1 = self.conv_q.weight
primals_3 = self.conv_k.weight
primals_4 = self.conv_v.weight
primals_7 = self.conv.weight
primals_5 = self.norm.weight
primals_6 = self.norm.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| StephenStorm/ACAR | HR2O_NL | false | 1,161 | [
"Apache-2.0"
] | 0 | 21ef3eca7330bd62eccb645018c8e48d9fc52153 | https://github.com/StephenStorm/ACAR/tree/21ef3eca7330bd62eccb645018c8e48d9fc52153 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, hidden_dim=512, kernel_size=3, mlp_1x1=False):
super().__init__()
self.hidden_dim = hidden_dim
padding = kernel_size // 2
self.conv_q = nn.Conv2d(hidden_dim, hidden_dim, kernel_size,
padding=padding, bias=False)
self.conv_k = nn.Conv2d(hidden_dim, hidden_dim, kernel_size,
padding=padding, bias=False)
self.conv_v = nn.Conv2d(hidden_dim, hidden_dim, kernel_size,
padding=padding, bias=False)
self.conv = nn.Conv2d(hidden_dim, hidden_dim, 1 if mlp_1x1 else
kernel_size, padding=0 if mlp_1x1 else padding, bias=False)
self.norm = nn.GroupNorm(1, hidden_dim, affine=True)
self.dp = nn.Dropout(0.2)
def forward(self, x):
query = self.conv_q(x).unsqueeze(1)
key = self.conv_k(x).unsqueeze(0)
att = (query * key).sum(2) / self.hidden_dim ** 0.5
att = nn.Softmax(dim=1)(att)
value = self.conv_v(x)
virt_feats = (att.unsqueeze(2) * value).sum(1)
virt_feats = self.norm(virt_feats)
virt_feats = nn.functional.relu(virt_feats)
virt_feats = self.conv(virt_feats)
virt_feats = self.dp(virt_feats)
x = x + virt_feats
return x
def get_inputs():
return [torch.rand([4, 512, 64, 64])]
def get_init_inputs():
return []
|
Gated_Recurrent_Unit | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/6q/c6q46q7lsepa4jw5qgcgbc5kiud5wm57hubk6vfo4gk47vl2tprk.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%primals_1,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/yo/cyof62umdskzkj4eethr6hq4fh4mcsng3u7pc7rf73hxtpeptbh4.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.add]
# Source node to ATen node mapping:
# output => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %view_3), kwargs = {})
triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [relu_1], Original ATen: [aten.relu]
triton_poi_fused_relu_0.run(primals_4, buf2, 256, grid=grid(256), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf3)
del primals_5
buf4 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.add]
triton_poi_fused_add_1.run(buf4, primals_3, buf3, primals_6, 256, grid=grid(256), stream=stream0)
del buf3
del primals_3
del primals_6
return (buf4, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(buf2, (64, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torchvision.transforms import functional as F
import torch.utils.data
from torch import nn
import torch.nn.functional as F
class Gated_Recurrent_Unit(nn.Module):
def __init__(self, fea_size, dropout):
super(Gated_Recurrent_Unit, self).__init__()
self.wih = nn.Linear(fea_size, fea_size, bias=True)
self.whh = nn.Linear(fea_size, fea_size, bias=True)
self.dropout = dropout
def forward(self, input, hidden):
output = self.wih(F.relu(input)) + self.whh(F.relu(hidden))
if self.dropout:
output = F.dropout(output, training=self.training)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'fea_size': 4, 'dropout': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_relu_0[grid(256)](primals_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_relu_0[grid(256)](primals_4, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf3)
del primals_5
buf4 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused_add_1[grid(256)](buf4, primals_3, buf3, primals_6,
256, XBLOCK=128, num_warps=4, num_stages=1)
del buf3
del primals_3
del primals_6
return buf4, reinterpret_tensor(buf0, (64, 4), (4, 1), 0
), reinterpret_tensor(buf2, (64, 4), (4, 1), 0)
class Gated_Recurrent_UnitNew(nn.Module):
def __init__(self, fea_size, dropout):
super(Gated_Recurrent_UnitNew, self).__init__()
self.wih = nn.Linear(fea_size, fea_size, bias=True)
self.whh = nn.Linear(fea_size, fea_size, bias=True)
self.dropout = dropout
def forward(self, input_0, input_1):
primals_2 = self.wih.weight
primals_3 = self.wih.bias
primals_5 = self.whh.weight
primals_6 = self.whh.bias
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| SpartaG117/scene_graph_benchmark | Gated_Recurrent_Unit | false | 1,162 | [
"MIT"
] | 0 | e2e49940dd2f752b1faf9ae26707435ba3441bcb | https://github.com/SpartaG117/scene_graph_benchmark/tree/e2e49940dd2f752b1faf9ae26707435ba3441bcb | import torch
from torchvision.transforms import functional as F
import torch.utils.data
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, fea_size, dropout):
super().__init__()
self.wih = nn.Linear(fea_size, fea_size, bias=True)
self.whh = nn.Linear(fea_size, fea_size, bias=True)
self.dropout = dropout
def forward(self, input, hidden):
output = self.wih(F.relu(input)) + self.whh(F.relu(hidden))
if self.dropout:
output = F.dropout(output, training=self.training)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 0.5]
|
ConstantODE | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/lz/clzgdfddrozy5odymngj4cdkrvkdttixpcmxu2nsxxlqvfkk3aed.py
# Topologically Sorted Source Nodes: [mul, add, sub, pow_1, add_1], Original ATen: [aten.mul, aten.add, aten.sub, aten.pow]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# mul => mul
# pow_1 => pow_1
# sub => sub
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %primals_2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_3), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_4, %add), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 5), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %pow_1), kwargs = {})
triton_poi_fused_add_mul_pow_sub_0 = async_compile.triton('triton_poi_fused_add_mul_pow_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_pow_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_pow_sub_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + (x0), xmask)
tmp3 = tl.load(in_ptr2 + (x0), xmask)
tmp5 = tl.load(in_ptr3 + (0))
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp4 = tmp1 * tmp3
tmp7 = tmp4 + tmp6
tmp8 = tmp2 - tmp7
tmp9 = tmp8 * tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp10 * tmp8
tmp12 = tmp1 + tmp11
tl.store(out_ptr0 + (x0), tmp12, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (), ())
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (), ())
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, add, sub, pow_1, add_1], Original ATen: [aten.mul, aten.add, aten.sub, aten.pow]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_pow_sub_0.run(primals_1, primals_4, primals_2, primals_3, buf0, 256, grid=grid(256), stream=stream0)
return (buf0, primals_1, primals_2, primals_3, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((), (), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((), (), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class ConstantODE(torch.nn.Module):
def __init__(self):
super(ConstantODE, self).__init__()
self.a = torch.nn.Parameter(torch.tensor(0.2))
self.b = torch.nn.Parameter(torch.tensor(3.0))
def forward(self, t, y):
return self.a + (y - (self.a * t + self.b)) ** 5
def y_exact(self, t):
return self.a * t + self.b
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_pow_sub_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp3 = tl.load(in_ptr2 + x0, xmask)
tmp5 = tl.load(in_ptr3 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp4 = tmp1 * tmp3
tmp7 = tmp4 + tmp6
tmp8 = tmp2 - tmp7
tmp9 = tmp8 * tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp10 * tmp8
tmp12 = tmp1 + tmp11
tl.store(out_ptr0 + x0, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (), ())
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (), ())
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_pow_sub_0[grid(256)](primals_1, primals_4,
primals_2, primals_3, buf0, 256, XBLOCK=128, num_warps=4,
num_stages=1)
return buf0, primals_1, primals_2, primals_3, primals_4
class ConstantODENew(torch.nn.Module):
def __init__(self):
super(ConstantODENew, self).__init__()
self.a = torch.nn.Parameter(torch.tensor(0.2))
self.b = torch.nn.Parameter(torch.tensor(3.0))
def y_exact(self, t):
return self.a * t + self.b
def forward(self, input_0, input_1):
primals_1 = self.a
primals_3 = self.b
primals_2 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| TylerChoi1224/torchdiffeq | ConstantODE | false | 1,163 | [
"MIT"
] | 0 | 72f74d9651a58ab11cdadd60682f1b61e625ef53 | https://github.com/TylerChoi1224/torchdiffeq/tree/72f74d9651a58ab11cdadd60682f1b61e625ef53 | import torch
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.a = torch.nn.Parameter(torch.tensor(0.2))
self.b = torch.nn.Parameter(torch.tensor(3.0))
def forward(self, t, y):
return self.a + (y - (self.a * t + self.b)) ** 5
def y_exact(self, t):
return self.a * t + self.b
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
GradientReversal | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/u5/cu56dhpcth43gy4shrd7mcexf4nfa6qetnnhwe4mno4v6ug76h6j.py
# Topologically Sorted Source Nodes: [clone], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# clone => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%arg0_1,), kwargs = {})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [clone], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.autograd import Function
import torch
class GradientReversalFunction(Function):
"""
Gradient Reversal Layer from:
Unsupervised Domain Adaptation by Backpropagation (Ganin & Lempitsky, 2015)
Forward pass is the identity function. In the backward pass,
the upstream gradients are multiplied by -lambda (i.e. gradient is reversed)
"""
@staticmethod
def forward(ctx, x, lambda_):
ctx.lambda_ = lambda_
return x.clone()
@staticmethod
def backward(ctx, grads):
lambda_ = ctx.lambda_
lambda_ = grads.new_tensor(lambda_)
dx = -lambda_ * grads
return dx, None
class GradientReversal(torch.nn.Module):
def __init__(self, lambda_=1):
super(GradientReversal, self).__init__()
self.lambda_ = lambda_
def forward(self, x):
return GradientReversalFunction.apply(x, self.lambda_)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.autograd import Function
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class GradientReversalFunction(Function):
"""
Gradient Reversal Layer from:
Unsupervised Domain Adaptation by Backpropagation (Ganin & Lempitsky, 2015)
Forward pass is the identity function. In the backward pass,
the upstream gradients are multiplied by -lambda (i.e. gradient is reversed)
"""
@staticmethod
def forward(ctx, x, lambda_):
ctx.lambda_ = lambda_
return x.clone()
@staticmethod
def backward(ctx, grads):
lambda_ = ctx.lambda_
lambda_ = grads.new_tensor(lambda_)
dx = -lambda_ * grads
return dx, None
class GradientReversalNew(torch.nn.Module):
def __init__(self, lambda_=1):
super(GradientReversalNew, self).__init__()
self.lambda_ = lambda_
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| TheElderMindseeker/pytorch-domain-adaptation | GradientReversal | false | 1,164 | [
"MIT"
] | 0 | 70ca862708bd6e59b5eee5d7c8bd808ef3457dc8 | https://github.com/TheElderMindseeker/pytorch-domain-adaptation/tree/70ca862708bd6e59b5eee5d7c8bd808ef3457dc8 | from torch.autograd import Function
import torch
class GradientReversalFunction(Function):
"""
Gradient Reversal Layer from:
Unsupervised Domain Adaptation by Backpropagation (Ganin & Lempitsky, 2015)
Forward pass is the identity function. In the backward pass,
the upstream gradients are multiplied by -lambda (i.e. gradient is reversed)
"""
@staticmethod
def forward(ctx, x, lambda_):
ctx.lambda_ = lambda_
return x.clone()
@staticmethod
def backward(ctx, grads):
lambda_ = ctx.lambda_
lambda_ = grads.new_tensor(lambda_)
dx = -lambda_ * grads
return dx, None
class Model(torch.nn.Module):
def __init__(self, lambda_=1):
super().__init__()
self.lambda_ = lambda_
def forward(self, x):
return GradientReversalFunction.apply(x, self.lambda_)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Decoder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/tf/ctfrmkdzcncuvqa3lx5gfprtbhmrpnkdxzqqmfnra2srkxlmy2kn.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_1 => relu
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_6, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1280
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 20
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x4), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/re/creookpdgr2hf34ub6gmeaguf32rxk6p3rlylk2rt2cuu4sg2o5z.py
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.view]
# Source node to ATen node mapping:
# out_2 => view_7
# Graph fragment:
# %view_7 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%view_6, [64, 20]), kwargs = {})
triton_poi_fused_view_1 = async_compile.triton('triton_poi_fused_view_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_view_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1280
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 20
x1 = (xindex // 20)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (20*x1) + (80*((x1 % 4) // 4)) + (320*(((4*((x1 // 4) % 4)) + (x1 % 4)) // 16))), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (20, 4), (4, 1))
assert_size_stride(primals_2, (20, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (2, 20), (20, 1))
assert_size_stride(primals_5, (2, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 20), (20, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 20), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 20), (320, 80, 20, 1), 0); del buf0 # reuse
buf4 = empty_strided_cuda((4, 4, 4, 20), (320, 80, 20, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf4, 1280, grid=grid(1280), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 20), (20, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.view]
triton_poi_fused_view_1.run(buf1, buf2, 1280, grid=grid(1280), stream=stream0)
del buf1
buf3 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, buf2, reinterpret_tensor(primals_4, (20, 2), (1, 20), 0), alpha=1, beta=1, out=buf3)
del primals_5
return (reinterpret_tensor(buf3, (4, 4, 4, 2), (32, 8, 2, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, primals_4, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((20, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((2, 20), (20, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Decoder(nn.Module):
def __init__(self, latent_dim=4, obs_dim=2, nhidden=20):
super(Decoder, self).__init__()
self.relu = nn.ReLU(inplace=True)
self.fc1 = nn.Linear(latent_dim, nhidden)
self.fc2 = nn.Linear(nhidden, obs_dim)
def forward(self, z):
out = self.fc1(z)
out = self.relu(out)
out = self.fc2(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1280
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 20
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + x4, tmp6, xmask)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1280
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 20
x1 = xindex // 20
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 20 * x1 + 80 * (x1 % 4 // 4) + 320 * ((4 *
(x1 // 4 % 4) + x1 % 4) // 16)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (20, 4), (4, 1))
assert_size_stride(primals_2, (20,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (2, 20), (20, 1))
assert_size_stride(primals_5, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 20), (20, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 20), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 20), (320, 80, 20, 1), 0)
del buf0
buf4 = empty_strided_cuda((4, 4, 4, 20), (320, 80, 20, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(1280)](buf1,
primals_2, buf4, 1280, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 20), (20, 1), torch.float32)
triton_poi_fused_view_1[grid(1280)](buf1, buf2, 1280, XBLOCK=256,
num_warps=4, num_stages=1)
del buf1
buf3 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_5, buf2, reinterpret_tensor(primals_4,
(20, 2), (1, 20), 0), alpha=1, beta=1, out=buf3)
del primals_5
return reinterpret_tensor(buf3, (4, 4, 4, 2), (32, 8, 2, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf2, primals_4, buf4
class DecoderNew(nn.Module):
def __init__(self, latent_dim=4, obs_dim=2, nhidden=20):
super(DecoderNew, self).__init__()
self.relu = nn.ReLU(inplace=True)
self.fc1 = nn.Linear(latent_dim, nhidden)
self.fc2 = nn.Linear(nhidden, obs_dim)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| TylerChoi1224/torchdiffeq | Decoder | false | 1,165 | [
"MIT"
] | 0 | 72f74d9651a58ab11cdadd60682f1b61e625ef53 | https://github.com/TylerChoi1224/torchdiffeq/tree/72f74d9651a58ab11cdadd60682f1b61e625ef53 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, latent_dim=4, obs_dim=2, nhidden=20):
super().__init__()
self.relu = nn.ReLU(inplace=True)
self.fc1 = nn.Linear(latent_dim, nhidden)
self.fc2 = nn.Linear(nhidden, obs_dim)
def forward(self, z):
out = self.fc1(z)
out = self.relu(out)
out = self.fc2(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
WeighedMSELoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/3x/c3xujuw6s3ums2q52c2zz67wpzpwemh2jfeivhwqaaxdqvqxg2ya.py
# Topologically Sorted Source Nodes: [loss, mul, mean], Original ATen: [aten.mse_loss, aten.mul, aten.mean]
# Source node to ATen node mapping:
# loss => pow_1, sub
# mean => mean
# mul => mul
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 4), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul,), kwargs = {})
triton_per_fused_mean_mse_loss_mul_0 = async_compile.triton('triton_per_fused_mean_mse_loss_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_mse_loss_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = 4.0
tmp5 = tmp3 * tmp4
tmp6 = tl.broadcast_to(tmp5, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = 256.0
tmp10 = tmp8 / tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp10, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [loss, mul, mean], Original ATen: [aten.mse_loss, aten.mul, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_mse_loss_mul_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import Tensor
from torch.nn import MSELoss
class WeighedMSELoss(MSELoss):
def __init__(self, weights):
super().__init__(reduction='none')
self.weights = weights
def forward(self, input: 'Tensor', target: 'Tensor') ->Tensor:
loss = super().forward(input, target)
return (loss * self.weights).mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'weights': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.nn import MSELoss
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = 4.0
tmp5 = tmp3 * tmp4
tmp6 = tl.broadcast_to(tmp5, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = 256.0
tmp10 = tmp8 / tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_mse_loss_mul_0[grid(1)](buf1, arg1_1, arg0_1,
1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class WeighedMSELossNew(MSELoss):
def __init__(self, weights):
super().__init__(reduction='none')
self.weights = weights
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| UT-ADL/lidar-as-camera | WeighedMSELoss | false | 1,166 | [
"Apache-2.0"
] | 0 | daccb2ae21b4899ecfd8611b7a27f91681617383 | https://github.com/UT-ADL/lidar-as-camera/tree/daccb2ae21b4899ecfd8611b7a27f91681617383 | import torch
from torch import Tensor
from torch.nn import MSELoss
class Model(MSELoss):
def __init__(self, weights):
super().__init__(reduction='none')
self.weights = weights
def forward(self, input: 'Tensor', target: 'Tensor') ->Tensor:
loss = super().forward(input, target)
return (loss * self.weights).mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
LanguageModelCriterion | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/y6/cy6c63inntzv3gk7tttvtntl47lyazeloek7emnkrhkhkqbx5kci.py
# Topologically Sorted Source Nodes: [neg, output, sum_1, sum_2, output_1], Original ATen: [aten.neg, aten.mul, aten.sum, aten.div]
# Source node to ATen node mapping:
# neg => neg
# output => mul
# output_1 => div
# sum_1 => sum_1
# sum_2 => sum_2
# Graph fragment:
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%squeeze,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %arg2_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%arg2_1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %sum_2), kwargs = {})
triton_per_fused_div_mul_neg_sum_0 = async_compile.triton('triton_per_fused_div_mul_neg_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mul_neg_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_mul_neg_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp9 = tl.load(in_ptr2 + (r0), None)
tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4), "index out of bounds: 0 <= tmp4 < 4")
tmp6 = tl.load(in_ptr1 + (tmp4 + (4*r0)), None, eviction_policy='evict_last')
tmp7 = -tmp6
tmp8 = tmp7.to(tl.float32)
tmp10 = tmp8 * tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.sum(tmp11, 1)[:, None]
tmp14 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp16 = tl.sum(tmp14, 1)[:, None]
tmp17 = tmp13 / tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp17, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [neg, output, sum_1, sum_2, output_1], Original ATen: [aten.neg, aten.mul, aten.sum, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_div_mul_neg_sum_0.run(buf2, arg0_1, arg1_1, arg2_1, 1, 16, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.int64)
arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.int64)
arg2_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.autograd import *
class LanguageModelCriterion(nn.Module):
def __init__(self):
super(LanguageModelCriterion, self).__init__()
def forward(self, input, target, mask):
if target.ndim == 3:
target = target.reshape(-1, target.shape[2])
mask = mask.reshape(-1, mask.shape[2])
target = target[:, :input.size(1)]
mask = mask[:, :input.size(1)].float()
output = -input.gather(2, target.unsqueeze(2)).squeeze(2) * mask
output = torch.sum(output) / torch.sum(mask)
return output
def get_inputs():
return [torch.ones([4, 4, 4], dtype=torch.int64), torch.ones([4, 4],
dtype=torch.int64), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.autograd import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_mul_neg_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp9 = tl.load(in_ptr2 + r0, None)
tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4),
'index out of bounds: 0 <= tmp4 < 4')
tmp6 = tl.load(in_ptr1 + (tmp4 + 4 * r0), None, eviction_policy=
'evict_last')
tmp7 = -tmp6
tmp8 = tmp7.to(tl.float32)
tmp10 = tmp8 * tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.sum(tmp11, 1)[:, None]
tmp14 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp16 = tl.sum(tmp14, 1)[:, None]
tmp17 = tmp13 / tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp17, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_div_mul_neg_sum_0[grid(1)](buf2, arg0_1, arg1_1,
arg2_1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf2,
class LanguageModelCriterionNew(nn.Module):
def __init__(self):
super(LanguageModelCriterionNew, self).__init__()
def forward(self, input_0, input_1, input_2):
arg1_1 = input_0
arg0_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| VISLANG-Lab/MGCL | LanguageModelCriterion | false | 1,167 | [
"MIT"
] | 0 | 22da06ffa7410d9632bfda8eefb1b79e4f660de0 | https://github.com/VISLANG-Lab/MGCL/tree/22da06ffa7410d9632bfda8eefb1b79e4f660de0 | import torch
import torch.nn as nn
from torch.autograd import *
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input, target, mask):
if target.ndim == 3:
target = target.reshape(-1, target.shape[2])
mask = mask.reshape(-1, mask.shape[2])
target = target[:, :input.size(1)]
mask = mask[:, :input.size(1)].float()
output = -input.gather(2, target.unsqueeze(2)).squeeze(2) * mask
output = torch.sum(output) / torch.sum(mask)
return output
def get_inputs():
return [torch.ones([4, 4, 4], dtype=torch.int64), torch.ones([4, 4],
dtype=torch.int64), torch.rand([4, 4])]
def get_init_inputs():
return []
|
PoolFormerBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/dr/cdrbbq25gaacwdmuqqn76ytppvbzlwbqwo7aazovogwtjetsi3kf.py
# Topologically Sorted Source Nodes: [group_norm], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# group_norm => add, add_1, mul_1, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_7), kwargs = {})
# %add_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_4), kwargs = {})
triton_per_fused_native_group_norm_0 = async_compile.triton('triton_per_fused_native_group_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_native_group_norm_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = (rindex // 16)
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + (r3), None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + (r3), None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 64.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tl.store(out_ptr2 + (r1 + (64*x0)), tmp27, xmask)
tl.store(out_ptr3 + (x0), tmp22, xmask)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ou/coupyg345piz42etl5al6lwpef5kkccwwnxo3krwbluktyaryaxz.py
# Topologically Sorted Source Nodes: [avg_pool2d, group_norm_1], Original ATen: [aten.avg_pool2d, aten.native_group_norm]
# Source node to ATen node mapping:
# avg_pool2d => avg_pool2d
# group_norm_1 => add_3, add_4, mul_4, rsqrt_1, var_mean_1
# Graph fragment:
# %avg_pool2d : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%add_1, [3, 3], [1, 1], [1, 1], False, False), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_2, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_3,), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, %unsqueeze_15), kwargs = {})
# %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %unsqueeze_12), kwargs = {})
triton_per_fused_avg_pool2d_native_group_norm_1 = async_compile.triton('triton_per_fused_avg_pool2d_native_group_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_avg_pool2d_native_group_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 14, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_avg_pool2d_native_group_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = (rindex // 4) % 4
r1 = rindex % 4
r6 = rindex
x0 = xindex
r3 = (rindex // 16)
tmp54 = tl.load(in_ptr1 + (r6 + (64*x0)), xmask, other=0.0)
tmp55 = tl.load(in_ptr2 + (r3), None, eviction_policy='evict_last')
tmp56 = tl.load(in_ptr0 + (r6 + (64*x0)), xmask, other=0.0)
tmp83 = tl.load(in_ptr3 + (r3), None, eviction_policy='evict_last')
tmp85 = tl.load(in_ptr4 + (r3), None, eviction_policy='evict_last')
tmp0 = (-1) + r2
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = (-1) + r1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + ((-5) + r6 + (64*x0)), tmp10 & xmask, other=0.0)
tmp12 = r1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + ((-4) + r6 + (64*x0)), tmp16 & xmask, other=0.0)
tmp18 = tmp17 + tmp11
tmp19 = 1 + r1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + ((-3) + r6 + (64*x0)), tmp23 & xmask, other=0.0)
tmp25 = tmp24 + tmp18
tmp26 = r2
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + ((-1) + r6 + (64*x0)), tmp30 & xmask, other=0.0)
tmp32 = tmp31 + tmp25
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (r6 + (64*x0)), tmp33 & xmask, other=0.0)
tmp35 = tmp34 + tmp32
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (1 + r6 + (64*x0)), tmp36 & xmask, other=0.0)
tmp38 = tmp37 + tmp35
tmp39 = 1 + r2
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (3 + r6 + (64*x0)), tmp43 & xmask, other=0.0)
tmp45 = tmp44 + tmp38
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4 + r6 + (64*x0)), tmp46 & xmask, other=0.0)
tmp48 = tmp47 + tmp45
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (5 + r6 + (64*x0)), tmp49 & xmask, other=0.0)
tmp51 = tmp50 + tmp48
tmp52 = (((0) * ((0) >= ((-1) + r1)) + ((-1) + r1) * (((-1) + r1) > (0)))*((0) * ((0) >= ((-1) + r2)) + ((-1) + r2) * (((-1) + r2) > (0)))) + (((4) * ((4) <= (2 + r1)) + (2 + r1) * ((2 + r1) < (4)))*((4) * ((4) <= (2 + r2)) + (2 + r2) * ((2 + r2) < (4)))) + ((-1)*((0) * ((0) >= ((-1) + r1)) + ((-1) + r1) * (((-1) + r1) > (0)))*((4) * ((4) <= (2 + r2)) + (2 + r2) * ((2 + r2) < (4)))) + ((-1)*((0) * ((0) >= ((-1) + r2)) + ((-1) + r2) * (((-1) + r2) > (0)))*((4) * ((4) <= (2 + r1)) + (2 + r1) * ((2 + r1) < (4))))
tmp53 = tmp51 / tmp52
tmp57 = tmp53 - tmp56
tmp58 = tmp55 * tmp57
tmp59 = tmp54 + tmp58
tmp60 = tl.broadcast_to(tmp59, [XBLOCK, RBLOCK])
tmp62 = tl.where(xmask, tmp60, 0)
tmp63 = tl.broadcast_to(tmp60, [XBLOCK, RBLOCK])
tmp65 = tl.where(xmask, tmp63, 0)
tmp66 = tl.sum(tmp65, 1)[:, None]
tmp67 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp68 = tmp67.to(tl.float32)
tmp69 = tmp66 / tmp68
tmp70 = tmp60 - tmp69
tmp71 = tmp70 * tmp70
tmp72 = tl.broadcast_to(tmp71, [XBLOCK, RBLOCK])
tmp74 = tl.where(xmask, tmp72, 0)
tmp75 = tl.sum(tmp74, 1)[:, None]
tmp76 = tmp59 - tmp69
tmp77 = 64.0
tmp78 = tmp75 / tmp77
tmp79 = 1e-05
tmp80 = tmp78 + tmp79
tmp81 = libdevice.rsqrt(tmp80)
tmp82 = tmp76 * tmp81
tmp84 = tmp82 * tmp83
tmp86 = tmp84 + tmp85
tl.store(out_ptr0 + (r6 + (64*x0)), tmp53, xmask)
tl.store(out_ptr3 + (r6 + (64*x0)), tmp86, xmask)
tl.store(out_ptr4 + (x0), tmp81, xmask)
tl.store(out_ptr1 + (x0), tmp69, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/gd/cgdslzbvz4m7vmsmypc7vpzz4mhpkezpihgu3s34qnd5bo5vg7h5.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.gelu]
# Source node to ATen node mapping:
# x_1 => convolution
# x_2 => add_5, erf, mul_5, mul_6, mul_7
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%add_4, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.5), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.7071067811865476), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_6,), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {})
# %mul_7 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %add_5), kwargs = {})
triton_poi_fused_convolution_gelu_2 = async_compile.triton('triton_poi_fused_convolution_gelu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_gelu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_gelu_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = 0.7071067811865476
tmp6 = tmp2 * tmp5
tmp7 = libdevice.erf(tmp6)
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = tmp4 * tmp9
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/2s/c2sr6yssngw5ceeda7lntygiwg4bblf5mrmsznbnzvcgr3mzmwrc.py
# Topologically Sorted Source Nodes: [sub, mul, x, x_4, mul_1, x_6], Original ATen: [aten.sub, aten.mul, aten.add, aten.convolution]
# Source node to ATen node mapping:
# mul => mul_2
# mul_1 => mul_8
# sub => sub_1
# x => add_2
# x_4 => convolution_1
# x_6 => add_6
# Graph fragment:
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%avg_pool2d, %add_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_1, %sub_1), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_4, %mul_2), kwargs = {})
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%mul_7, %primals_10, %primals_11, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_9, %convolution_1), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %mul_8), kwargs = {})
triton_poi_fused_add_convolution_mul_sub_3 = async_compile.triton('triton_poi_fused_add_convolution_mul_sub_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_mul_sub_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_mul_sub_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x3), xmask)
tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x3), xmask)
tmp6 = tl.load(in_ptr4 + (x3), xmask)
tmp10 = tl.load(in_ptr5 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp7 = tmp5 - tmp6
tmp8 = tmp4 * tmp7
tmp9 = tmp3 + tmp8
tmp11 = tmp10 * tmp2
tmp12 = tmp9 + tmp11
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
tl.store(out_ptr0 + (x3), tmp12, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (16, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_9, (16, ), (1, ))
assert_size_stride(primals_10, (4, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_11, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf16 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
# Topologically Sorted Source Nodes: [group_norm], Original ATen: [aten.native_group_norm]
stream0 = get_raw_stream(0)
triton_per_fused_native_group_norm_0.run(primals_4, primals_2, primals_3, buf0, buf3, buf16, 4, 64, grid=grid(4), stream=stream0)
del primals_2
del primals_3
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf9 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
# Topologically Sorted Source Nodes: [avg_pool2d, group_norm_1], Original ATen: [aten.avg_pool2d, aten.native_group_norm]
triton_per_fused_avg_pool2d_native_group_norm_1.run(buf3, primals_4, primals_1, primals_6, primals_7, buf4, buf5, buf8, buf9, 4, 64, grid=grid(4), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf8, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 16, 4, 4), (256, 16, 4, 1))
buf11 = buf10; del buf10 # reuse
buf12 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.gelu]
triton_poi_fused_convolution_gelu_2.run(buf11, primals_9, buf12, 1024, grid=grid(1024), stream=stream0)
del primals_9
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
buf13 = extern_kernels.convolution(buf12, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 4, 4, 4), (64, 16, 4, 1))
buf14 = buf13; del buf13 # reuse
buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, mul, x, x_4, mul_1, x_6], Original ATen: [aten.sub, aten.mul, aten.add, aten.convolution]
triton_poi_fused_add_convolution_mul_sub_3.run(buf14, primals_11, primals_4, primals_1, buf4, buf3, primals_5, buf15, 256, grid=grid(256), stream=stream0)
del primals_11
return (buf15, primals_1, primals_4, primals_5, primals_6, primals_8, primals_10, buf3, buf4, buf8, reinterpret_tensor(buf5, (4, 1), (1, 1), 0), reinterpret_tensor(buf9, (4, 1), (1, 1), 0), buf11, buf12, buf14, reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0), reinterpret_tensor(buf16, (4, 1, 1), (1, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((16, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 16, 1, 1), (16, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import warnings
import torch.nn as nn
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
"""Copy & paste from PyTorch official master until it's in a few official releases - RW
Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
"""
def norm_cdf(x):
"""Computes standard normal cumulative distribution function"""
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
if mean < a - 2 * std or mean > b + 2 * std:
warnings.warn(
'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. The distribution of values may be incorrect.'
, stacklevel=2)
with torch.no_grad():
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
tensor.uniform_(2 * l - 1, 2 * u - 1)
tensor.erfinv_()
tensor.mul_(std * math.sqrt(2.0))
tensor.add_(mean)
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
"""Copy & paste from PyTorch official master until it's in a few official releases - RW
Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
class GroupNorm(nn.GroupNorm):
"""
Group Normalization with 1 group.
Input: tensor in shape [B, C, H, W]
"""
def __init__(self, num_channels, **kwargs):
super().__init__(1, num_channels, **kwargs)
class Pooling(nn.Module):
"""
Implementation of pooling for PoolFormer
--pool_size: pooling size
"""
def __init__(self, pool_size=3):
super().__init__()
self.pool = nn.AvgPool2d(pool_size, stride=1, padding=pool_size //
2, count_include_pad=False)
def forward(self, x):
return self.pool(x) - x
class Mlp(nn.Module):
"""
Implementation of MLP with 1*1 convolutions.
Input: tensor with shape [B, C, H, W]
"""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Conv2d(in_features, hidden_features, 1)
self.act = act_layer()
self.fc2 = nn.Conv2d(hidden_features, out_features, 1)
self.drop = nn.Dropout(drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Conv2d):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class PoolFormerBlock(nn.Module):
"""
Implementation of one PoolFormer block.
--dim: embedding dim
--pool_size: pooling size
--mlp_ratio: mlp expansion ratio
--act_layer: activation
--norm_layer: normalization
--drop: dropout rate
--drop path: Stochastic Depth,
refer to https://arxiv.org/abs/1603.09382
--use_layer_scale, --layer_scale_init_value: LayerScale,
refer to https://arxiv.org/abs/2103.17239
"""
def __init__(self, dim, pool_size=3, mlp_ratio=4.0, act_layer=nn.GELU,
norm_layer=GroupNorm, drop=0.0, drop_path=0.0, use_layer_scale=True,
layer_scale_init_value=1e-05):
super().__init__()
self.norm1 = norm_layer(dim)
self.token_mixer = Pooling(pool_size=pool_size)
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
self.drop_path = DropPath(drop_path
) if drop_path > 0.0 else nn.Identity()
self.use_layer_scale = use_layer_scale
if use_layer_scale:
self.layer_scale_1 = nn.Parameter(layer_scale_init_value *
torch.ones(dim), requires_grad=True)
self.layer_scale_2 = nn.Parameter(layer_scale_init_value *
torch.ones(dim), requires_grad=True)
def forward(self, x):
if self.use_layer_scale:
x = x + self.drop_path(self.layer_scale_1.unsqueeze(-1).
unsqueeze(-1) * self.token_mixer(self.norm1(x)))
x = x + self.drop_path(self.layer_scale_2.unsqueeze(-1).
unsqueeze(-1) * self.mlp(self.norm2(x)))
else:
x = x + self.drop_path(self.token_mixer(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import warnings
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_native_group_norm_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 64.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tl.store(out_ptr2 + (r1 + 64 * x0), tmp27, xmask)
tl.store(out_ptr3 + x0, tmp22, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_per_fused_avg_pool2d_native_group_norm_1(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr3, out_ptr4,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex // 4 % 4
r1 = rindex % 4
r6 = rindex
x0 = xindex
r3 = rindex // 16
tmp54 = tl.load(in_ptr1 + (r6 + 64 * x0), xmask, other=0.0)
tmp55 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last')
tmp56 = tl.load(in_ptr0 + (r6 + 64 * x0), xmask, other=0.0)
tmp83 = tl.load(in_ptr3 + r3, None, eviction_policy='evict_last')
tmp85 = tl.load(in_ptr4 + r3, None, eviction_policy='evict_last')
tmp0 = -1 + r2
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + r1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-5 + r6 + 64 * x0), tmp10 & xmask, other=0.0)
tmp12 = r1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-4 + r6 + 64 * x0), tmp16 & xmask, other=0.0)
tmp18 = tmp17 + tmp11
tmp19 = 1 + r1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-3 + r6 + 64 * x0), tmp23 & xmask, other=0.0)
tmp25 = tmp24 + tmp18
tmp26 = r2
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-1 + r6 + 64 * x0), tmp30 & xmask, other=0.0)
tmp32 = tmp31 + tmp25
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (r6 + 64 * x0), tmp33 & xmask, other=0.0)
tmp35 = tmp34 + tmp32
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (1 + r6 + 64 * x0), tmp36 & xmask, other=0.0)
tmp38 = tmp37 + tmp35
tmp39 = 1 + r2
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (3 + r6 + 64 * x0), tmp43 & xmask, other=0.0)
tmp45 = tmp44 + tmp38
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4 + r6 + 64 * x0), tmp46 & xmask, other=0.0)
tmp48 = tmp47 + tmp45
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (5 + r6 + 64 * x0), tmp49 & xmask, other=0.0)
tmp51 = tmp50 + tmp48
tmp52 = (0 * (0 >= -1 + r1) + (-1 + r1) * (-1 + r1 > 0)) * (0 * (0 >= -
1 + r2) + (-1 + r2) * (-1 + r2 > 0)) + (4 * (4 <= 2 + r1) + (2 + r1
) * (2 + r1 < 4)) * (4 * (4 <= 2 + r2) + (2 + r2) * (2 + r2 < 4)
) + -1 * (0 * (0 >= -1 + r1) + (-1 + r1) * (-1 + r1 > 0)) * (4 * (4 <=
2 + r2) + (2 + r2) * (2 + r2 < 4)) + -1 * (0 * (0 >= -1 + r2) + (-1 +
r2) * (-1 + r2 > 0)) * (4 * (4 <= 2 + r1) + (2 + r1) * (2 + r1 < 4))
tmp53 = tmp51 / tmp52
tmp57 = tmp53 - tmp56
tmp58 = tmp55 * tmp57
tmp59 = tmp54 + tmp58
tmp60 = tl.broadcast_to(tmp59, [XBLOCK, RBLOCK])
tl.where(xmask, tmp60, 0)
tmp63 = tl.broadcast_to(tmp60, [XBLOCK, RBLOCK])
tmp65 = tl.where(xmask, tmp63, 0)
tmp66 = tl.sum(tmp65, 1)[:, None]
tmp67 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp68 = tmp67.to(tl.float32)
tmp69 = tmp66 / tmp68
tmp70 = tmp60 - tmp69
tmp71 = tmp70 * tmp70
tmp72 = tl.broadcast_to(tmp71, [XBLOCK, RBLOCK])
tmp74 = tl.where(xmask, tmp72, 0)
tmp75 = tl.sum(tmp74, 1)[:, None]
tmp76 = tmp59 - tmp69
tmp77 = 64.0
tmp78 = tmp75 / tmp77
tmp79 = 1e-05
tmp80 = tmp78 + tmp79
tmp81 = libdevice.rsqrt(tmp80)
tmp82 = tmp76 * tmp81
tmp84 = tmp82 * tmp83
tmp86 = tmp84 + tmp85
tl.store(out_ptr0 + (r6 + 64 * x0), tmp53, xmask)
tl.store(out_ptr3 + (r6 + 64 * x0), tmp86, xmask)
tl.store(out_ptr4 + x0, tmp81, xmask)
tl.store(out_ptr1 + x0, tmp69, xmask)
@triton.jit
def triton_poi_fused_convolution_gelu_2(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = 0.7071067811865476
tmp6 = tmp2 * tmp5
tmp7 = libdevice.erf(tmp6)
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = tmp4 * tmp9
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_convolution_mul_sub_3(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x3, xmask)
tmp6 = tl.load(in_ptr4 + x3, xmask)
tmp10 = tl.load(in_ptr5 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp7 = tmp5 - tmp6
tmp8 = tmp4 * tmp7
tmp9 = tmp3 + tmp8
tmp11 = tmp10 * tmp2
tmp12 = tmp9 + tmp11
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp12, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (16, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_9, (16,), (1,))
assert_size_stride(primals_10, (4, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf16 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
get_raw_stream(0)
triton_per_fused_native_group_norm_0[grid(4)](primals_4, primals_2,
primals_3, buf0, buf3, buf16, 4, 64, XBLOCK=1, num_warps=2,
num_stages=1)
del primals_2
del primals_3
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf9 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
triton_per_fused_avg_pool2d_native_group_norm_1[grid(4)](buf3,
primals_4, primals_1, primals_6, primals_7, buf4, buf5, buf8,
buf9, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del primals_7
buf10 = extern_kernels.convolution(buf8, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 16, 4, 4), (256, 16, 4, 1))
buf11 = buf10
del buf10
buf12 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.
float32)
triton_poi_fused_convolution_gelu_2[grid(1024)](buf11, primals_9,
buf12, 1024, XBLOCK=128, num_warps=4, num_stages=1)
del primals_9
buf13 = extern_kernels.convolution(buf12, primals_10, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 4, 4, 4), (64, 16, 4, 1))
buf14 = buf13
del buf13
buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_convolution_mul_sub_3[grid(256)](buf14,
primals_11, primals_4, primals_1, buf4, buf3, primals_5, buf15,
256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
return (buf15, primals_1, primals_4, primals_5, primals_6, primals_8,
primals_10, buf3, buf4, buf8, reinterpret_tensor(buf5, (4, 1), (1,
1), 0), reinterpret_tensor(buf9, (4, 1), (1, 1), 0), buf11, buf12,
buf14, reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0),
reinterpret_tensor(buf16, (4, 1, 1), (1, 1, 1), 0))
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
"""Copy & paste from PyTorch official master until it's in a few official releases - RW
Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
"""
def norm_cdf(x):
"""Computes standard normal cumulative distribution function"""
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
if mean < a - 2 * std or mean > b + 2 * std:
warnings.warn(
'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. The distribution of values may be incorrect.'
, stacklevel=2)
with torch.no_grad():
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
tensor.uniform_(2 * l - 1, 2 * u - 1)
tensor.erfinv_()
tensor.mul_(std * math.sqrt(2.0))
tensor.add_(mean)
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
"""Copy & paste from PyTorch official master until it's in a few official releases - RW
Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
class GroupNorm(nn.GroupNorm):
"""
Group Normalization with 1 group.
Input: tensor in shape [B, C, H, W]
"""
def __init__(self, num_channels, **kwargs):
super().__init__(1, num_channels, **kwargs)
class Pooling(nn.Module):
"""
Implementation of pooling for PoolFormer
--pool_size: pooling size
"""
def __init__(self, pool_size=3):
super().__init__()
self.pool = nn.AvgPool2d(pool_size, stride=1, padding=pool_size //
2, count_include_pad=False)
def forward(self, x):
return self.pool(x) - x
class Mlp(nn.Module):
"""
Implementation of MLP with 1*1 convolutions.
Input: tensor with shape [B, C, H, W]
"""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Conv2d(in_features, hidden_features, 1)
self.act = act_layer()
self.fc2 = nn.Conv2d(hidden_features, out_features, 1)
self.drop = nn.Dropout(drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Conv2d):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class PoolFormerBlockNew(nn.Module):
"""
Implementation of one PoolFormer block.
--dim: embedding dim
--pool_size: pooling size
--mlp_ratio: mlp expansion ratio
--act_layer: activation
--norm_layer: normalization
--drop: dropout rate
--drop path: Stochastic Depth,
refer to https://arxiv.org/abs/1603.09382
--use_layer_scale, --layer_scale_init_value: LayerScale,
refer to https://arxiv.org/abs/2103.17239
"""
def __init__(self, dim, pool_size=3, mlp_ratio=4.0, act_layer=nn.GELU,
norm_layer=GroupNorm, drop=0.0, drop_path=0.0, use_layer_scale=True,
layer_scale_init_value=1e-05):
super().__init__()
self.norm1 = norm_layer(dim)
self.token_mixer = Pooling(pool_size=pool_size)
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
self.drop_path = DropPath(drop_path
) if drop_path > 0.0 else nn.Identity()
self.use_layer_scale = use_layer_scale
if use_layer_scale:
self.layer_scale_1 = nn.Parameter(layer_scale_init_value *
torch.ones(dim), requires_grad=True)
self.layer_scale_2 = nn.Parameter(layer_scale_init_value *
torch.ones(dim), requires_grad=True)
def forward(self, input_0):
primals_1 = self.layer_scale_1
primals_2 = self.layer_scale_2
primals_3 = self.norm1.weight
primals_5 = self.norm1.bias
primals_6 = self.norm2.weight
primals_7 = self.norm2.bias
primals_8 = self.mlp.fc1.weight
primals_9 = self.mlp.fc1.bias
primals_10 = self.mlp.fc2.weight
primals_11 = self.mlp.fc2.bias
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| TranNhiem/solo-learn | PoolFormerBlock | false | 1,168 | [
"MIT"
] | 0 | 7539732b68d153087d09a26a23e1edfdc49bc086 | https://github.com/TranNhiem/solo-learn/tree/7539732b68d153087d09a26a23e1edfdc49bc086 | import math
import torch
import warnings
import torch.nn as nn
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
"""Copy & paste from PyTorch official master until it's in a few official releases - RW
Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
"""
def norm_cdf(x):
"""Computes standard normal cumulative distribution function"""
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
if mean < a - 2 * std or mean > b + 2 * std:
warnings.warn(
'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. The distribution of values may be incorrect.'
, stacklevel=2)
with torch.no_grad():
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
tensor.uniform_(2 * l - 1, 2 * u - 1)
tensor.erfinv_()
tensor.mul_(std * math.sqrt(2.0))
tensor.add_(mean)
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
"""Copy & paste from PyTorch official master until it's in a few official releases - RW
Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
class GroupNorm(nn.GroupNorm):
"""
Group Normalization with 1 group.
Input: tensor in shape [B, C, H, W]
"""
def __init__(self, num_channels, **kwargs):
super().__init__(1, num_channels, **kwargs)
class Pooling(nn.Module):
"""
Implementation of pooling for PoolFormer
--pool_size: pooling size
"""
def __init__(self, pool_size=3):
super().__init__()
self.pool = nn.AvgPool2d(pool_size, stride=1, padding=pool_size //
2, count_include_pad=False)
def forward(self, x):
return self.pool(x) - x
class Mlp(nn.Module):
"""
Implementation of MLP with 1*1 convolutions.
Input: tensor with shape [B, C, H, W]
"""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Conv2d(in_features, hidden_features, 1)
self.act = act_layer()
self.fc2 = nn.Conv2d(hidden_features, out_features, 1)
self.drop = nn.Dropout(drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Conv2d):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Model(nn.Module):
"""
Implementation of one PoolFormer block.
--dim: embedding dim
--pool_size: pooling size
--mlp_ratio: mlp expansion ratio
--act_layer: activation
--norm_layer: normalization
--drop: dropout rate
--drop path: Stochastic Depth,
refer to https://arxiv.org/abs/1603.09382
--use_layer_scale, --layer_scale_init_value: LayerScale,
refer to https://arxiv.org/abs/2103.17239
"""
def __init__(self, dim, pool_size=3, mlp_ratio=4.0, act_layer=nn.GELU,
norm_layer=GroupNorm, drop=0.0, drop_path=0.0, use_layer_scale=True,
layer_scale_init_value=1e-05):
super().__init__()
self.norm1 = norm_layer(dim)
self.token_mixer = Pooling(pool_size=pool_size)
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
self.drop_path = DropPath(drop_path
) if drop_path > 0.0 else nn.Identity()
self.use_layer_scale = use_layer_scale
if use_layer_scale
# ... truncated (>4000 chars) for memory efficiency |
SelfAttn | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/nc/cncwsucylpsg2zmlivjfxu6vbd64ztxjndlsix2ysjtby3xohgk4.py
# Topologically Sorted Source Nodes: [ui], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# ui => tanh
# Graph fragment:
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {})
triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/gj/cgjeqalj3tj2k5zxudyuzg5uvy3eelkmu2pzeffdkeicr4q7pgpz.py
# Topologically Sorted Source Nodes: [ai], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# ai => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [0], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/f5/cf5347orlkm5ylmh4iouw6qvowwebewwg66dnqrhzt7tg7a4irp5.py
# Topologically Sorted Source Nodes: [ai], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# ai => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [0], True), kwargs = {})
# %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/hl/chlurvl25cmjpuzhx4f3avcjm5jhixwhpb3wd6dosusoswupaqcq.py
# Topologically Sorted Source Nodes: [mul, o], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# mul => mul
# o => sum_2
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %div), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [0]), kwargs = {})
triton_poi_fused_mul_sum_3 = async_compile.triton('triton_poi_fused_mul_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + x2), xmask)
tmp4 = tl.load(in_ptr1 + (16 + x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (128 + x2), xmask)
tmp8 = tl.load(in_ptr1 + (32 + x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (192 + x2), xmask)
tmp12 = tl.load(in_ptr1 + (48 + x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + (x2), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [ui], Original ATen: [aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_tanh_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, out=buf2)
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [ai], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf2, buf3, 64, grid=grid(64), stream=stream0)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [ai], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf3, buf4, 64, grid=grid(64), stream=stream0)
buf5 = reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [mul, o], Original ATen: [aten.mul, aten.sum]
triton_poi_fused_mul_sum_3.run(primals_3, buf4, buf5, 64, grid=grid(64), stream=stream0)
return (buf5, reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0), primals_3, buf1, buf4, reinterpret_tensor(primals_4, (1, 4), (1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class SelfAttn(nn.Module):
"""
Self attention layer: aggreagating a sequence into a single vector.
This implementation uses the attention formula proposed by Sukhbaatar etal. 2015
https://papers.nips.cc/paper/5846-end-to-end-memory-networks.pdf
Usage:
seq_len=10; bsz=16; in_dim=128
attn = SelfAtnn(in_dim)
x = torch.rand(seq_len, bsz, in_dim) # 10x16x128
y, a = attn(x) # output y 16x128, attention weight a 10x16
"""
def __init__(self, d_input, units=None):
"""
:param d_input: input feature dimension
:param units: dimension of internal projection, if None it will be set to d_input
"""
super(SelfAttn, self).__init__()
self.d_input = d_input
self.units = units if units else d_input
self.projection = nn.Linear(self.d_input, self.units)
self.V = nn.Parameter(torch.Tensor(self.units, 1))
self.init_weights()
def init_weights(self):
initrange = 0.1
self.projection.bias.data.zero_()
self.projection.weight.data.uniform_(-initrange, initrange)
self.V.data.uniform_(-initrange, initrange)
def forward(self, x, mask=None):
"""
ui = tanh(xW+b)
a = softmax(uV)
o = sum(a*x)
:param x: input tensor [seq_len, bsz, feat_dim]
:return: output tensor [bsz, feat_dim]
"""
ui = torch.tanh(self.projection(x))
ai = F.softmax(torch.matmul(ui, self.V), dim=0)
if mask is not None:
ai = ai * mask.unsqueeze(-1)
ai = ai / ai.sum(dim=0, keepdim=True)
o = torch.sum(x * ai, dim=0)
return o, ai.squeeze(-1)
def extra_repr(self):
return 'Sx?x%d -> ?x%d' % (self.d_input, self.d_input)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_input': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + x2), xmask)
tmp4 = tl.load(in_ptr1 + (16 + x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (128 + x2), xmask)
tmp8 = tl.load(in_ptr1 + (32 + x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (192 + x2), xmask)
tmp12 = tl.load(in_ptr1 + (48 + x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x2, tmp14, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(256)](buf1, primals_2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
primals_4, out=buf2)
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_1[grid(64)](buf2, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf2
triton_poi_fused__softmax_2[grid(64)](buf3, buf4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf5 = reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0)
del buf3
triton_poi_fused_mul_sum_3[grid(64)](primals_3, buf4, buf5, 64,
XBLOCK=64, num_warps=1, num_stages=1)
return buf5, reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0
), primals_3, buf1, buf4, reinterpret_tensor(primals_4, (1, 4), (1,
1), 0)
class SelfAttnNew(nn.Module):
"""
Self attention layer: aggreagating a sequence into a single vector.
This implementation uses the attention formula proposed by Sukhbaatar etal. 2015
https://papers.nips.cc/paper/5846-end-to-end-memory-networks.pdf
Usage:
seq_len=10; bsz=16; in_dim=128
attn = SelfAtnn(in_dim)
x = torch.rand(seq_len, bsz, in_dim) # 10x16x128
y, a = attn(x) # output y 16x128, attention weight a 10x16
"""
def __init__(self, d_input, units=None):
"""
:param d_input: input feature dimension
:param units: dimension of internal projection, if None it will be set to d_input
"""
super(SelfAttnNew, self).__init__()
self.d_input = d_input
self.units = units if units else d_input
self.projection = nn.Linear(self.d_input, self.units)
self.V = nn.Parameter(torch.Tensor(self.units, 1))
self.init_weights()
def init_weights(self):
initrange = 0.1
self.projection.bias.data.zero_()
self.projection.weight.data.uniform_(-initrange, initrange)
self.V.data.uniform_(-initrange, initrange)
def extra_repr(self):
return 'Sx?x%d -> ?x%d' % (self.d_input, self.d_input)
def forward(self, input_0):
primals_4 = self.V
primals_1 = self.projection.weight
primals_2 = self.projection.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0], output[1]
| TuBui/deep_image_comparator | SelfAttn | false | 1,169 | [
"MIT"
] | 0 | 2dea7738d794b91a960ee9f41461a4e3ffcd5e44 | https://github.com/TuBui/deep_image_comparator/tree/2dea7738d794b91a960ee9f41461a4e3ffcd5e44 | import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
"""
Self attention layer: aggreagating a sequence into a single vector.
This implementation uses the attention formula proposed by Sukhbaatar etal. 2015
https://papers.nips.cc/paper/5846-end-to-end-memory-networks.pdf
Usage:
seq_len=10; bsz=16; in_dim=128
attn = SelfAtnn(in_dim)
x = torch.rand(seq_len, bsz, in_dim) # 10x16x128
y, a = attn(x) # output y 16x128, attention weight a 10x16
"""
def __init__(self, d_input, units=None):
"""
:param d_input: input feature dimension
:param units: dimension of internal projection, if None it will be set to d_input
"""
super().__init__()
self.d_input = d_input
self.units = units if units else d_input
self.projection = nn.Linear(self.d_input, self.units)
self.V = nn.Parameter(torch.Tensor(self.units, 1))
self.init_weights()
def init_weights(self):
initrange = 0.1
self.projection.bias.data.zero_()
self.projection.weight.data.uniform_(-initrange, initrange)
self.V.data.uniform_(-initrange, initrange)
def forward(self, x, mask=None):
"""
ui = tanh(xW+b)
a = softmax(uV)
o = sum(a*x)
:param x: input tensor [seq_len, bsz, feat_dim]
:return: output tensor [bsz, feat_dim]
"""
ui = torch.tanh(self.projection(x))
ai = F.softmax(torch.matmul(ui, self.V), dim=0)
if mask is not None:
ai = ai * mask.unsqueeze(-1)
ai = ai / ai.sum(dim=0, keepdim=True)
o = torch.sum(x * ai, dim=0)
return o, ai.squeeze(-1)
def extra_repr(self):
return 'Sx?x%d -> ?x%d' % (self.d_input, self.d_input)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
SinkhornKnopp | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/3g/c3gd5ipjfbufzu6hvujxufg6z3emufd62cvcqjyy3muqew2xvzbd.py
# Topologically Sorted Source Nodes: [sum_Q], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_Q => sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%permute,), kwargs = {})
triton_per_fused_sum_0 = async_compile.triton('triton_per_fused_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.sum(tmp4, 1)[:, None]
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/pb/cpb5rsib7fg2llnv7dg2enzvdhrqqrlufmxvryadr6zckhtmyysm.py
# Topologically Sorted Source Nodes: [sum_of_rows], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_of_rows => sum_2
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%permute_2, [1], True), kwargs = {})
triton_poi_fused_sum_1 = async_compile.triton('triton_poi_fused_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp12 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp17 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp7 * tmp1
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp9 / tmp5
tmp11 = tmp6 + tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp16 = tmp11 + tmp15
tmp18 = tmp17 * tmp1
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp19 / tmp5
tmp21 = tmp16 + tmp20
tl.store(out_ptr0 + (x0), tmp21, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ay/cayipr5tzejzfvzl7ba3w7cqhf2axor2ag2cqam22ambv3zsqfqn.py
# Topologically Sorted Source Nodes: [sum_3], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_3 => sum_3
# Graph fragment:
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%permute_6, [0], True), kwargs = {})
triton_poi_fused_sum_2 = async_compile.triton('triton_poi_fused_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + (0))
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp12 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr2 + (1))
tmp17 = tl.broadcast_to(tmp16, [XBLOCK])
tmp21 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr2 + (2))
tmp26 = tl.broadcast_to(tmp25, [XBLOCK])
tmp30 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr2 + (3))
tmp35 = tl.broadcast_to(tmp34, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp9 = tmp6 / tmp8
tmp10 = 0.25
tmp11 = tmp9 * tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp18 = tmp15 / tmp17
tmp19 = tmp18 * tmp10
tmp20 = tmp11 + tmp19
tmp22 = tmp21 * tmp1
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp23 / tmp5
tmp27 = tmp24 / tmp26
tmp28 = tmp27 * tmp10
tmp29 = tmp20 + tmp28
tmp31 = tmp30 * tmp1
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp32 / tmp5
tmp36 = tmp33 / tmp35
tmp37 = tmp36 * tmp10
tmp38 = tmp29 + tmp37
tl.store(out_ptr0 + (x0), tmp38, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/gf/cgfazley3l5rdd6ypj2skdwbxoltt6cdo3d3mpt4z3kqywdlsil5.py
# Topologically Sorted Source Nodes: [sum_of_rows_1], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_of_rows_1 => sum_4
# Graph fragment:
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%permute_10, [1], True), kwargs = {})
triton_poi_fused_sum_3 = async_compile.triton('triton_poi_fused_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + (x0), xmask)
tmp11 = tl.load(in_ptr3 + (0))
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp15 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp21 = tl.load(in_ptr3 + (1))
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp26 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp32 = tl.load(in_ptr3 + (2))
tmp33 = tl.broadcast_to(tmp32, [XBLOCK])
tmp37 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp43 = tl.load(in_ptr3 + (3))
tmp44 = tl.broadcast_to(tmp43, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp6 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp13 = tmp10 / tmp12
tmp14 = tmp13 * tmp9
tmp16 = tmp15 * tmp1
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp17 / tmp5
tmp19 = tmp18 / tmp7
tmp20 = tmp19 * tmp9
tmp23 = tmp20 / tmp22
tmp24 = tmp23 * tmp9
tmp25 = tmp14 + tmp24
tmp27 = tmp26 * tmp1
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp28 / tmp5
tmp30 = tmp29 / tmp7
tmp31 = tmp30 * tmp9
tmp34 = tmp31 / tmp33
tmp35 = tmp34 * tmp9
tmp36 = tmp25 + tmp35
tmp38 = tmp37 * tmp1
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp39 / tmp5
tmp41 = tmp40 / tmp7
tmp42 = tmp41 * tmp9
tmp45 = tmp42 / tmp44
tmp46 = tmp45 * tmp9
tmp47 = tmp36 + tmp46
tl.store(out_ptr0 + (x0), tmp47, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/im/cimyq4ncn2lt7wfmnkrejd4t7hctagm26a6wsjpob7y2e6gdebuy.py
# Topologically Sorted Source Nodes: [Q_7], Original ATen: [aten.div]
# Source node to ATen node mapping:
# Q_7 => div_7
# Graph fragment:
# %div_7 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%permute_12, 4), kwargs = {})
triton_poi_fused_div_4 = async_compile.triton('triton_poi_fused_div_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp6 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp12 = tmp10 / tmp11
tmp13 = tmp12 * tmp9
tmp15 = tmp13 / tmp14
tmp16 = tmp15 * tmp9
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/f4/cf4q6mh4hv4abtcbirw4vkce7au36jgqitbjdciddckwumhdqyiq.py
# Topologically Sorted Source Nodes: [Q_9], Original ATen: [aten.div]
# Source node to ATen node mapping:
# Q_9 => div_9
# Graph fragment:
# %div_9 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%permute_16, 4), kwargs = {})
triton_poi_fused_div_5 = async_compile.triton('triton_poi_fused_div_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/zl/czlhaiakbynkvvwkmvsahwmxzw7xdccr5ho7bxsxvn4m725ewzpe.py
# Topologically Sorted Source Nodes: [Q_11], Original ATen: [aten.div]
# Source node to ATen node mapping:
# Q_11 => div_11
# Graph fragment:
# %div_11 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%permute_20, 4), kwargs = {})
triton_poi_fused_div_6 = async_compile.triton('triton_poi_fused_div_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/36/c36xk4rs42yzbwetmlob65txyg64pwcsheobuwb6xw3pwyopqghz.py
# Topologically Sorted Source Nodes: [Q_14], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# Q_14 => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_26, 4), kwargs = {})
triton_poi_fused_mul_7 = async_compile.triton('triton_poi_fused_mul_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp11 = 4.0
tmp12 = tmp10 * tmp11
tl.store(out_ptr0 + (x1 + (4*y0)), tmp12, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/dt/cdt2v5rbgu26tdvjkb5jbjk3tqziqwvnlr3boatkbhu2j3hsthua.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %permute_31 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%permute_30, [1, 0]), kwargs = {})
triton_poi_fused_8 = async_compile.triton('triton_poi_fused_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask)
tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [sum_Q], Original ATen: [aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_sum_0.run(arg0_1, buf0, 1, 16, grid=grid(1), stream=stream0)
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [sum_of_rows], Original ATen: [aten.sum]
triton_poi_fused_sum_1.run(arg0_1, buf0, buf1, 4, grid=grid(4), stream=stream0)
buf2 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_3], Original ATen: [aten.sum]
triton_poi_fused_sum_2.run(arg0_1, buf0, buf1, buf2, 4, grid=grid(4), stream=stream0)
buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [sum_of_rows_1], Original ATen: [aten.sum]
triton_poi_fused_sum_3.run(arg0_1, buf0, buf1, buf2, buf3, 4, grid=grid(4), stream=stream0)
buf4 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [Q_7], Original ATen: [aten.div]
triton_poi_fused_div_4.run(arg0_1, buf0, buf1, buf2, buf3, buf4, 16, grid=grid(16), stream=stream0)
del arg0_1
del buf0
del buf1
del buf2
del buf3
buf5 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [Q_9], Original ATen: [aten.div]
triton_poi_fused_div_5.run(buf4, buf5, 16, grid=grid(16), stream=stream0)
buf6 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [Q_11], Original ATen: [aten.div]
triton_poi_fused_div_6.run(buf5, buf6, 16, grid=grid(16), stream=stream0)
buf7 = reinterpret_tensor(buf5, (4, 4), (4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [Q_14], Original ATen: [aten.mul]
triton_poi_fused_mul_7.run(buf6, buf7, 4, 4, grid=grid(4, 4), stream=stream0)
buf8 = reinterpret_tensor(buf6, (4, 4), (4, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_8.run(buf7, buf8, 4, 4, grid=grid(4, 4), stream=stream0)
del buf7
return (buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.distributed as dist
class SinkhornKnopp(torch.nn.Module):
def __init__(self, num_iters: 'int'=3, epsilon: 'float'=0.05,
world_size: 'int'=1):
"""Approximates optimal transport using the Sinkhorn-Knopp algorithm.
A simple iterative method to approach the double stochastic matrix is to alternately rescale
rows and columns of the matrix to sum to 1.
Args:
num_iters (int, optional): number of times to perform row and column normalization.
Defaults to 3.
epsilon (float, optional): weight for the entropy regularization term. Defaults to 0.05.
world_size (int, optional): number of nodes for distributed training. Defaults to 1.
"""
super().__init__()
self.num_iters = num_iters
self.epsilon = epsilon
self.world_size = world_size
@torch.no_grad()
def forward(self, Q: 'torch.Tensor') ->torch.Tensor:
"""Produces assignments using Sinkhorn-Knopp algorithm.
Applies the entropy regularization, normalizes the Q matrix and then normalizes rows and
columns in an alternating fashion for num_iter times. Before returning it normalizes again
the columns in order for the output to be an assignment of samples to prototypes.
Args:
Q (torch.Tensor): cosine similarities between the features of the
samples and the prototypes.
Returns:
torch.Tensor: assignment of samples to prototypes according to optimal transport.
"""
Q = torch.exp(Q / self.epsilon).t()
B = Q.shape[1] * self.world_size
K = Q.shape[0]
sum_Q = torch.sum(Q)
if dist.is_available() and dist.is_initialized():
dist.all_reduce(sum_Q)
Q /= sum_Q
for _ in range(self.num_iters):
sum_of_rows = torch.sum(Q, dim=1, keepdim=True)
if dist.is_available() and dist.is_initialized():
dist.all_reduce(sum_of_rows)
Q /= sum_of_rows
Q /= K
Q /= torch.sum(Q, dim=0, keepdim=True)
Q /= B
Q *= B
return Q.t()
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.sum(tmp4, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp6, None)
@triton.jit
def triton_poi_fused_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp12 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp17 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp7 * tmp1
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp9 / tmp5
tmp11 = tmp6 + tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp16 = tmp11 + tmp15
tmp18 = tmp17 * tmp1
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp19 / tmp5
tmp21 = tmp16 + tmp20
tl.store(out_ptr0 + x0, tmp21, xmask)
@triton.jit
def triton_poi_fused_sum_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + 0)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp12 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr2 + 1)
tmp17 = tl.broadcast_to(tmp16, [XBLOCK])
tmp21 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr2 + 2)
tmp26 = tl.broadcast_to(tmp25, [XBLOCK])
tmp30 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp34 = tl.load(in_ptr2 + 3)
tmp35 = tl.broadcast_to(tmp34, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp9 = tmp6 / tmp8
tmp10 = 0.25
tmp11 = tmp9 * tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp18 = tmp15 / tmp17
tmp19 = tmp18 * tmp10
tmp20 = tmp11 + tmp19
tmp22 = tmp21 * tmp1
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp23 / tmp5
tmp27 = tmp24 / tmp26
tmp28 = tmp27 * tmp10
tmp29 = tmp20 + tmp28
tmp31 = tmp30 * tmp1
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp32 / tmp5
tmp36 = tmp33 / tmp35
tmp37 = tmp36 * tmp10
tmp38 = tmp29 + tmp37
tl.store(out_ptr0 + x0, tmp38, xmask)
@triton.jit
def triton_poi_fused_sum_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + x0, xmask)
tmp11 = tl.load(in_ptr3 + 0)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp15 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp21 = tl.load(in_ptr3 + 1)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp26 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp32 = tl.load(in_ptr3 + 2)
tmp33 = tl.broadcast_to(tmp32, [XBLOCK])
tmp37 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp43 = tl.load(in_ptr3 + 3)
tmp44 = tl.broadcast_to(tmp43, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp6 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp13 = tmp10 / tmp12
tmp14 = tmp13 * tmp9
tmp16 = tmp15 * tmp1
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp17 / tmp5
tmp19 = tmp18 / tmp7
tmp20 = tmp19 * tmp9
tmp23 = tmp20 / tmp22
tmp24 = tmp23 * tmp9
tmp25 = tmp14 + tmp24
tmp27 = tmp26 * tmp1
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp28 / tmp5
tmp30 = tmp29 / tmp7
tmp31 = tmp30 * tmp9
tmp34 = tmp31 / tmp33
tmp35 = tmp34 * tmp9
tmp36 = tmp25 + tmp35
tmp38 = tmp37 * tmp1
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp39 / tmp5
tmp41 = tmp40 / tmp7
tmp42 = tmp41 * tmp9
tmp45 = tmp42 / tmp44
tmp46 = tmp45 * tmp9
tmp47 = tmp36 + tmp46
tl.store(out_ptr0 + x0, tmp47, xmask)
@triton.jit
def triton_poi_fused_div_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp6 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp12 = tmp10 / tmp11
tmp13 = tmp12 * tmp9
tmp15 = tmp13 / tmp14
tmp16 = tmp15 * tmp9
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_div_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_div_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_mul_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp11 = 4.0
tmp12 = tmp10 * tmp11
tl.store(out_ptr0 + (x1 + 4 * y0), tmp12, xmask & ymask)
@triton.jit
def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_sum_0[grid(1)](arg0_1, buf0, 1, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused_sum_1[grid(4)](arg0_1, buf0, buf1, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
triton_poi_fused_sum_2[grid(4)](arg0_1, buf0, buf1, buf2, 4, XBLOCK
=4, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused_sum_3[grid(4)](arg0_1, buf0, buf1, buf2, buf3, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
triton_poi_fused_div_4[grid(16)](arg0_1, buf0, buf1, buf2, buf3,
buf4, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
del buf0
del buf1
del buf2
del buf3
buf5 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
triton_poi_fused_div_5[grid(16)](buf4, buf5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf6 = buf4
del buf4
triton_poi_fused_div_6[grid(16)](buf5, buf6, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4), (4, 1), 0)
del buf5
triton_poi_fused_mul_7[grid(4, 4)](buf6, buf7, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf8 = reinterpret_tensor(buf6, (4, 4), (4, 1), 0)
del buf6
triton_poi_fused_8[grid(4, 4)](buf7, buf8, 4, 4, XBLOCK=4, YBLOCK=4,
num_warps=1, num_stages=1)
del buf7
return buf8,
class SinkhornKnoppNew(torch.nn.Module):
def __init__(self, num_iters: 'int'=3, epsilon: 'float'=0.05,
world_size: 'int'=1):
"""Approximates optimal transport using the Sinkhorn-Knopp algorithm.
A simple iterative method to approach the double stochastic matrix is to alternately rescale
rows and columns of the matrix to sum to 1.
Args:
num_iters (int, optional): number of times to perform row and column normalization.
Defaults to 3.
epsilon (float, optional): weight for the entropy regularization term. Defaults to 0.05.
world_size (int, optional): number of nodes for distributed training. Defaults to 1.
"""
super().__init__()
self.num_iters = num_iters
self.epsilon = epsilon
self.world_size = world_size
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| TranNhiem/solo-learn | SinkhornKnopp | false | 1,170 | [
"MIT"
] | 0 | 7539732b68d153087d09a26a23e1edfdc49bc086 | https://github.com/TranNhiem/solo-learn/tree/7539732b68d153087d09a26a23e1edfdc49bc086 | import torch
import torch.distributed as dist
class Model(torch.nn.Module):
def __init__(self, num_iters: 'int'=3, epsilon: 'float'=0.05,
world_size: 'int'=1):
"""Approximates optimal transport using the Sinkhorn-Knopp algorithm.
A simple iterative method to approach the double stochastic matrix is to alternately rescale
rows and columns of the matrix to sum to 1.
Args:
num_iters (int, optional): number of times to perform row and column normalization.
Defaults to 3.
epsilon (float, optional): weight for the entropy regularization term. Defaults to 0.05.
world_size (int, optional): number of nodes for distributed training. Defaults to 1.
"""
super().__init__()
self.num_iters = num_iters
self.epsilon = epsilon
self.world_size = world_size
@torch.no_grad()
def forward(self, Q: 'torch.Tensor') ->torch.Tensor:
"""Produces assignments using Sinkhorn-Knopp algorithm.
Applies the entropy regularization, normalizes the Q matrix and then normalizes rows and
columns in an alternating fashion for num_iter times. Before returning it normalizes again
the columns in order for the output to be an assignment of samples to prototypes.
Args:
Q (torch.Tensor): cosine similarities between the features of the
samples and the prototypes.
Returns:
torch.Tensor: assignment of samples to prototypes according to optimal transport.
"""
Q = torch.exp(Q / self.epsilon).t()
B = Q.shape[1] * self.world_size
K = Q.shape[0]
sum_Q = torch.sum(Q)
if dist.is_available() and dist.is_initialized():
dist.all_reduce(sum_Q)
Q /= sum_Q
for _ in range(self.num_iters):
sum_of_rows = torch.sum(Q, dim=1, keepdim=True)
if dist.is_available() and dist.is_initialized():
dist.all_reduce(sum_of_rows)
Q /= sum_of_rows
Q /= K
Q /= torch.sum(Q, dim=0, keepdim=True)
Q /= B
Q *= B
return Q.t()
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return []
|
RewardCriterion | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/th/cthi7dvxte3xeavjjqrz6omclqig3gqujobvt5ooicycppv45vw4.py
# Topologically Sorted Source Nodes: [neg, mul, output, sum_1, sum_2, output_1], Original ATen: [aten.neg, aten.mul, aten.sum, aten.div]
# Source node to ATen node mapping:
# mul => mul
# neg => neg
# output => mul_1
# output_1 => div
# sum_1 => sum_1
# sum_2 => sum_2
# Graph fragment:
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%view,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %view_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %view_2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_1,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%view_2,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %sum_2), kwargs = {})
triton_per_fused_div_mul_neg_sum_0 = async_compile.triton('triton_per_fused_div_mul_neg_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mul_neg_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_mul_neg_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp9 = tl.load(in_ptr2 + (r0), None)
tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4), "index out of bounds: 0 <= tmp4 < 4")
tmp6 = tl.load(in_ptr1 + (tmp4 + (4*r0)), None, eviction_policy='evict_last')
tmp7 = -tmp6
tmp8 = tmp7.to(tl.float32)
tmp10 = tmp8 * tmp9
tmp11 = r0 % 4
tmp12 = tl.full([1, 1], 0, tl.int64)
tmp13 = tmp11 >= tmp12
tmp14 = tl.full([1, 1], 1, tl.int64)
tmp15 = tmp11 < tmp14
tmp16 = 1.0
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp15, tmp16, tmp17)
tmp19 = tmp11 >= tmp14
tmp20 = tl.full([1, 1], 4, tl.int64)
tmp21 = tmp11 < tmp20
tmp22 = tl.load(in_ptr0 + (tl.broadcast_to((4*(r0 // 4)) + ((-1) + (r0 % 4)), [XBLOCK, RBLOCK])), tmp19, eviction_policy='evict_last', other=0.0)
tmp23 = tmp22 > tmp12
tmp24 = tmp23.to(tl.float32)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp19, tmp24, tmp25)
tmp27 = tl.where(tmp15, tmp18, tmp26)
tmp28 = tmp10 * tmp27
tmp29 = tl.broadcast_to(tmp28, [XBLOCK, RBLOCK])
tmp31 = tl.sum(tmp29, 1)[:, None]
tmp32 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK])
tmp34 = tl.sum(tmp32, 1)[:, None]
tmp35 = tmp31 / tmp34
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp35, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [neg, mul, output, sum_1, sum_2, output_1], Original ATen: [aten.neg, aten.mul, aten.sum, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_div_mul_neg_sum_0.run(buf2, arg1_1, arg0_1, arg2_1, 1, 16, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.int64)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.int64)
arg2_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.autograd import *
class RewardCriterion(nn.Module):
def __init__(self):
super(RewardCriterion, self).__init__()
def forward(self, input, seq, reward):
input = input.gather(2, seq.unsqueeze(2)).squeeze(2)
input = input.reshape(-1)
reward = reward.reshape(-1)
mask = (seq > 0).float()
mask = torch.cat([mask.new(mask.size(0), 1).fill_(1), mask[:, :-1]], 1
).reshape(-1)
output = -input * reward * mask
output = torch.sum(output) / torch.sum(mask)
return output
def get_inputs():
return [torch.ones([4, 4, 4], dtype=torch.int64), torch.ones([4, 4],
dtype=torch.int64), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.autograd import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_mul_neg_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp9 = tl.load(in_ptr2 + r0, None)
tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4),
'index out of bounds: 0 <= tmp4 < 4')
tmp6 = tl.load(in_ptr1 + (tmp4 + 4 * r0), None, eviction_policy=
'evict_last')
tmp7 = -tmp6
tmp8 = tmp7.to(tl.float32)
tmp10 = tmp8 * tmp9
tmp11 = r0 % 4
tmp12 = tl.full([1, 1], 0, tl.int64)
tmp14 = tl.full([1, 1], 1, tl.int64)
tmp15 = tmp11 < tmp14
tmp16 = 1.0
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp15, tmp16, tmp17)
tmp19 = tmp11 >= tmp14
tl.full([1, 1], 4, tl.int64)
tmp22 = tl.load(in_ptr0 + tl.broadcast_to(4 * (r0 // 4) + (-1 + r0 % 4),
[XBLOCK, RBLOCK]), tmp19, eviction_policy='evict_last', other=0.0)
tmp23 = tmp22 > tmp12
tmp24 = tmp23.to(tl.float32)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp19, tmp24, tmp25)
tmp27 = tl.where(tmp15, tmp18, tmp26)
tmp28 = tmp10 * tmp27
tmp29 = tl.broadcast_to(tmp28, [XBLOCK, RBLOCK])
tmp31 = tl.sum(tmp29, 1)[:, None]
tmp32 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK])
tmp34 = tl.sum(tmp32, 1)[:, None]
tmp35 = tmp31 / tmp34
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp35, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_div_mul_neg_sum_0[grid(1)](buf2, arg1_1, arg0_1,
arg2_1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf2,
class RewardCriterionNew(nn.Module):
def __init__(self):
super(RewardCriterionNew, self).__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| VISLANG-Lab/MGCL | RewardCriterion | false | 1,171 | [
"MIT"
] | 0 | 22da06ffa7410d9632bfda8eefb1b79e4f660de0 | https://github.com/VISLANG-Lab/MGCL/tree/22da06ffa7410d9632bfda8eefb1b79e4f660de0 | import torch
import torch.nn as nn
from torch.autograd import *
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input, seq, reward):
input = input.gather(2, seq.unsqueeze(2)).squeeze(2)
input = input.reshape(-1)
reward = reward.reshape(-1)
mask = (seq > 0).float()
mask = torch.cat([mask.new(mask.size(0), 1).fill_(1), mask[:, :-1]], 1
).reshape(-1)
output = -input * reward * mask
output = torch.sum(output) / torch.sum(mask)
return output
def get_inputs():
return [torch.ones([4, 4, 4], dtype=torch.int64), torch.ones([4, 4],
dtype=torch.int64), torch.rand([4, 4])]
def get_init_inputs():
return []
|
ResBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/3j/c3jk4fd45xsskb354bmqh5ayvalm334wxs72twddoal7gsrew3wi.py
# Topologically Sorted Source Nodes: [group_norm, out], Original ATen: [aten.native_group_norm, aten.relu]
# Source node to ATen node mapping:
# group_norm => add, add_1, mul_1, rsqrt, var_mean
# out => relu
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_5), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_2), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_1,), kwargs = {})
triton_per_fused_native_group_norm_relu_0 = async_compile.triton('triton_per_fused_native_group_norm_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_native_group_norm_relu_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 16.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = tl.full([1, 1], 0, tl.int32)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tl.store(out_ptr2 + (r1 + (16*x0)), tmp29, xmask)
tl.store(out_ptr3 + (x0), tmp22, xmask)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/43/c43iah2ujzdzlzvirc5zcusvrhdz3liemhgusdpro5bcmzekdxpa.py
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add => add_4
# Graph fragment:
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, %primals_1), kwargs = {})
triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, 4, 3, 3), (36, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf12 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [group_norm, out], Original ATen: [aten.native_group_norm, aten.relu]
stream0 = get_raw_stream(0)
triton_per_fused_native_group_norm_relu_0.run(primals_1, primals_2, primals_3, buf0, buf3, buf12, 16, 16, grid=grid(16), stream=stream0)
del primals_2
del primals_3
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf8 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.native_group_norm, aten.relu]
triton_per_fused_native_group_norm_relu_0.run(buf4, primals_5, primals_6, buf5, buf9, buf8, 16, 16, grid=grid(16), stream=stream0)
del primals_6
# Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf9, primals_7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 4, 4, 4), (64, 16, 4, 1))
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
triton_poi_fused_add_1.run(buf11, primals_1, 256, grid=grid(256), stream=stream0)
return (buf11, primals_1, primals_4, primals_5, primals_7, buf3, buf4, reinterpret_tensor(buf5, (4, 4), (4, 1), 0), reinterpret_tensor(buf8, (4, 4), (4, 1), 0), buf9, reinterpret_tensor(buf0, (4, 4, 1), (4, 1, 1), 0), reinterpret_tensor(buf12, (4, 4, 1), (4, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def norm(dim):
return nn.GroupNorm(min(32, dim), dim)
class ResBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(ResBlock, self).__init__()
self.norm1 = norm(inplanes)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.conv1 = conv3x3(inplanes, planes, stride)
self.norm2 = norm(planes)
self.conv2 = conv3x3(planes, planes)
def forward(self, x):
shortcut = x
out = self.relu(self.norm1(x))
if self.downsample is not None:
shortcut = self.downsample(out)
out = self.conv1(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv2(out)
return out + shortcut
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inplanes': 4, 'planes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_native_group_norm_relu_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 16.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = tl.full([1, 1], 0, tl.int32)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tl.store(out_ptr2 + (r1 + 16 * x0), tmp29, xmask)
tl.store(out_ptr3 + x0, tmp22, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4, 3, 3), (36, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf12 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
get_raw_stream(0)
triton_per_fused_native_group_norm_relu_0[grid(16)](primals_1,
primals_2, primals_3, buf0, buf3, buf12, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
del primals_2
del primals_3
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf8 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
triton_per_fused_native_group_norm_relu_0[grid(16)](buf4, primals_5,
primals_6, buf5, buf9, buf8, 16, 16, XBLOCK=1, num_warps=2,
num_stages=1)
del primals_6
buf10 = extern_kernels.convolution(buf9, primals_7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 4, 4, 4), (64, 16, 4, 1))
buf11 = buf10
del buf10
triton_poi_fused_add_1[grid(256)](buf11, primals_1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return (buf11, primals_1, primals_4, primals_5, primals_7, buf3, buf4,
reinterpret_tensor(buf5, (4, 4), (4, 1), 0), reinterpret_tensor(
buf8, (4, 4), (4, 1), 0), buf9, reinterpret_tensor(buf0, (4, 4, 1),
(4, 1, 1), 0), reinterpret_tensor(buf12, (4, 4, 1), (4, 1, 1), 0))
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def norm(dim):
return nn.GroupNorm(min(32, dim), dim)
class ResBlockNew(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(ResBlockNew, self).__init__()
self.norm1 = norm(inplanes)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.conv1 = conv3x3(inplanes, planes, stride)
self.norm2 = norm(planes)
self.conv2 = conv3x3(planes, planes)
def forward(self, input_0):
primals_2 = self.norm1.weight
primals_3 = self.norm1.bias
primals_4 = self.conv1.weight
primals_5 = self.norm2.weight
primals_6 = self.norm2.bias
primals_7 = self.conv2.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| TylerChoi1224/torchdiffeq | ResBlock | false | 1,172 | [
"MIT"
] | 0 | 72f74d9651a58ab11cdadd60682f1b61e625ef53 | https://github.com/TylerChoi1224/torchdiffeq/tree/72f74d9651a58ab11cdadd60682f1b61e625ef53 | import torch
import torch.nn as nn
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def norm(dim):
return nn.GroupNorm(min(32, dim), dim)
class Model(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super().__init__()
self.norm1 = norm(inplanes)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.conv1 = conv3x3(inplanes, planes, stride)
self.norm2 = norm(planes)
self.conv2 = conv3x3(planes, planes)
def forward(self, x):
shortcut = x
out = self.relu(self.norm1(x))
if self.downsample is not None:
shortcut = self.downsample(out)
out = self.conv1(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv2(out)
return out + shortcut
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
MultiheadAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/qw/cqw7yoyglmtjad3kirznl5odetqfs3k6pjtnfdbzklyhsdvuvgft.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# multi_head_attention_forward => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_3, 1.0), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/hz/chzi3aam26mikdhljz5x7jlqazm7kpktzeptsf36thgfhsg7ub6a.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# multi_head_attention_forward => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/em/cem6qbxwbiqnjqybzk5arf2obt5uggy4qs7otwwpovvnrhvdc6h4.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# multi_head_attention_forward => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/rh/crhjfwyl6xoj5ylcsbbh6lp2vlegits2zkdej3b3wb2q4ddfnejv.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# multi_head_attention_forward => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask)
tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/5z/c5zy7julai2lhuinuwjgyl62nx7cyws6ni5poe5jzp7qn532rcgh.py
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %squeeze), kwargs = {})
triton_poi_fused_add_4 = async_compile.triton('triton_poi_fused_add_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_out_ptr0 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_3, (4, ), (1, ), 4), primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_3, (4, ), (1, ), 8), primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf2)
del primals_2
buf3 = reinterpret_tensor(buf0, (4, 4, 1), (1, 4, 16), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(buf3, primals_3, 16, grid=grid(16), stream=stream0)
del primals_3
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
extern_kernels.bmm(buf3, reinterpret_tensor(buf1, (4, 1, 4), (1, 1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf4, buf5, 64, grid=grid(64), stream=stream0)
buf6 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf5, buf6, 64, grid=grid(64), stream=stream0)
del buf5
buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (4, 4, 1), (1, 4, 1), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf7, buf8, 4, 4, grid=grid(4, 4), stream=stream0)
buf9 = reinterpret_tensor(buf7, (4, 4), (4, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf8, (4, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf9)
buf10 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
triton_poi_fused_add_4.run(buf10, primals_1, primals_5, 16, grid=grid(16), stream=stream0)
del primals_5
return (buf10, primals_1, buf6, reinterpret_tensor(buf8, (4, 4), (4, 1), 0), primals_4, reinterpret_tensor(buf2, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf3, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf1, (4, 4, 1), (1, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class MultiheadAttention(nn.Module):
"""A warpper for torch.nn.MultiheadAttention.
This module implements MultiheadAttention with residual connection,
and positional encoding used in DETR is also passed as input.
Args:
embed_dims (int): The embedding dimension.
num_heads (int): Parallel attention heads. Same as
`nn.MultiheadAttention`.
dropout (float): A Dropout layer on attn_output_weights. Default 0.0.
"""
def __init__(self, embed_dims, num_heads, dropout=0.0):
super(MultiheadAttention, self).__init__()
assert embed_dims % num_heads == 0, f'embed_dims must be divisible by num_heads. got {embed_dims} and {num_heads}.'
self.embed_dims = embed_dims
self.num_heads = num_heads
self.dropout = dropout
self.attn = nn.MultiheadAttention(embed_dims, num_heads, dropout)
self.dropout = nn.Dropout(dropout)
def forward(self, x, key=None, value=None, residual=None, query_pos=
None, key_pos=None, attn_mask=None, key_padding_mask=None):
"""Forward function for `MultiheadAttention`.
Args:
x (Tensor): The input query with shape [num_query, bs,
embed_dims]. Same in `nn.MultiheadAttention.forward`.
key (Tensor): The key tensor with shape [num_key, bs,
embed_dims]. Same in `nn.MultiheadAttention.forward`.
Default None. If None, the `query` will be used.
value (Tensor): The value tensor with same shape as `key`.
Same in `nn.MultiheadAttention.forward`. Default None.
If None, the `key` will be used.
residual (Tensor): The tensor used for addition, with the
same shape as `x`. Default None. If None, `x` will be used.
query_pos (Tensor): The positional encoding for query, with
the same shape as `x`. Default None. If not None, it will
be added to `x` before forward function.
key_pos (Tensor): The positional encoding for `key`, with the
same shape as `key`. Default None. If not None, it will
be added to `key` before forward function. If None, and
`query_pos` has the same shape as `key`, then `query_pos`
will be used for `key_pos`.
attn_mask (Tensor): ByteTensor mask with shape [num_query,
num_key]. Same in `nn.MultiheadAttention.forward`.
Default None.
key_padding_mask (Tensor): ByteTensor with shape [bs, num_key].
Same in `nn.MultiheadAttention.forward`. Default None.
Returns:
Tensor: forwarded results with shape [num_query, bs, embed_dims].
"""
query = x
if key is None:
key = query
if value is None:
value = key
if residual is None:
residual = x
if key_pos is None:
if query_pos is not None and key is not None:
if query_pos.shape == key.shape:
key_pos = query_pos
if query_pos is not None:
query = query + query_pos
if key_pos is not None:
key = key + key_pos
out = self.attn(query, key, value=value, attn_mask=attn_mask,
key_padding_mask=key_padding_mask)[0]
return residual + self.dropout(out)
def __repr__(self):
"""str: a string that describes the module"""
repr_str = self.__class__.__name__
repr_str += f'(embed_dims={self.embed_dims}, '
repr_str += f'num_heads={self.num_heads}, '
repr_str += f'dropout={self.dropout})'
return repr_str
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'embed_dims': 4, 'num_heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4),
(1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_3, (4,), (1,), 4),
primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 16),
alpha=1, beta=1, out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_3, (4,), (1,), 8),
primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 32),
alpha=1, beta=1, out=buf2)
del primals_2
buf3 = reinterpret_tensor(buf0, (4, 4, 1), (1, 4, 16), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](buf3, primals_3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_3
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf3, reinterpret_tensor(buf1, (4, 1, 4), (1, 1,
4), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf6 = buf4
del buf4
triton_poi_fused__softmax_2[grid(64)](buf5, buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf5
buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (4, 4, 1), (1, 4,
1), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused_clone_3[grid(4, 4)](buf7, buf8, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf7, (4, 4), (4, 1), 0)
del buf7
extern_kernels.mm(reinterpret_tensor(buf8, (4, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf9)
buf10 = buf9
del buf9
triton_poi_fused_add_4[grid(16)](buf10, primals_1, primals_5, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_5
return buf10, primals_1, buf6, reinterpret_tensor(buf8, (4, 4), (4, 1), 0
), primals_4, reinterpret_tensor(buf2, (4, 1, 4), (1, 1, 4), 0
), reinterpret_tensor(buf3, (4, 1, 4), (1, 1, 4), 0
), reinterpret_tensor(buf1, (4, 4, 1), (1, 4, 1), 0)
class MultiheadAttentionNew(nn.Module):
"""A warpper for torch.nn.MultiheadAttention.
This module implements MultiheadAttention with residual connection,
and positional encoding used in DETR is also passed as input.
Args:
embed_dims (int): The embedding dimension.
num_heads (int): Parallel attention heads. Same as
`nn.MultiheadAttention`.
dropout (float): A Dropout layer on attn_output_weights. Default 0.0.
"""
def __init__(self, embed_dims, num_heads, dropout=0.0):
super(MultiheadAttentionNew, self).__init__()
assert embed_dims % num_heads == 0, f'embed_dims must be divisible by num_heads. got {embed_dims} and {num_heads}.'
self.embed_dims = embed_dims
self.num_heads = num_heads
self.dropout = dropout
self.attn = nn.MultiheadAttention(embed_dims, num_heads, dropout)
self.dropout = nn.Dropout(dropout)
def __repr__(self):
"""str: a string that describes the module"""
repr_str = self.__class__.__name__
repr_str += f'(embed_dims={self.embed_dims}, '
repr_str += f'num_heads={self.num_heads}, '
repr_str += f'dropout={self.dropout})'
return repr_str
def forward(self, input_0):
primals_2 = self.attn.in_proj_weight
primals_3 = self.attn.in_proj_bias
primals_1 = self.attn.out_proj.weight
primals_5 = self.attn.out_proj.bias
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| VIRC-lab-csust/AGMNet | MultiheadAttention | false | 1,173 | [
"Apache-2.0"
] | 0 | ead95466da343cf9436774138c642d2ca12da4e4 | https://github.com/VIRC-lab-csust/AGMNet/tree/ead95466da343cf9436774138c642d2ca12da4e4 | import torch
import torch.nn as nn
class Model(nn.Module):
"""A warpper for torch.nn.MultiheadAttention.
This module implements MultiheadAttention with residual connection,
and positional encoding used in DETR is also passed as input.
Args:
embed_dims (int): The embedding dimension.
num_heads (int): Parallel attention heads. Same as
`nn.MultiheadAttention`.
dropout (float): A Dropout layer on attn_output_weights. Default 0.0.
"""
def __init__(self, embed_dims, num_heads, dropout=0.0):
super().__init__()
assert embed_dims % num_heads == 0, f'embed_dims must be divisible by num_heads. got {embed_dims} and {num_heads}.'
self.embed_dims = embed_dims
self.num_heads = num_heads
self.dropout = dropout
self.attn = nn.MultiheadAttention(embed_dims, num_heads, dropout)
self.dropout = nn.Dropout(dropout)
def forward(self, x, key=None, value=None, residual=None, query_pos=
None, key_pos=None, attn_mask=None, key_padding_mask=None):
"""Forward function for `MultiheadAttention`.
Args:
x (Tensor): The input query with shape [num_query, bs,
embed_dims]. Same in `nn.MultiheadAttention.forward`.
key (Tensor): The key tensor with shape [num_key, bs,
embed_dims]. Same in `nn.MultiheadAttention.forward`.
Default None. If None, the `query` will be used.
value (Tensor): The value tensor with same shape as `key`.
Same in `nn.MultiheadAttention.forward`. Default None.
If None, the `key` will be used.
residual (Tensor): The tensor used for addition, with the
same shape as `x`. Default None. If None, `x` will be used.
query_pos (Tensor): The positional encoding for query, with
the same shape as `x`. Default None. If not None, it will
be added to `x` before forward function.
key_pos (Tensor): The positional encoding for `key`, with the
same shape as `key`. Default None. If not None, it will
be added to `key` before forward function. If None, and
`query_pos` has the same shape as `key`, then `query_pos`
will be used for `key_pos`.
attn_mask (Tensor): ByteTensor mask with shape [num_query,
num_key]. Same in `nn.MultiheadAttention.forward`.
Default None.
key_padding_mask (Tensor): ByteTensor with shape [bs, num_key].
Same in `nn.MultiheadAttention.forward`. Default None.
Returns:
Tensor: forwarded results with shape [num_query, bs, embed_dims].
"""
query = x
if key is None:
key = query
if value is None:
value = key
if residual is None:
residual = x
if key_pos is None:
if query_pos is not None and key is not None:
if query_pos.shape == key.shape:
key_pos = query_pos
if query_pos is not None:
query = query + query_pos
if key_pos is not None:
key = key + key_pos
out = self.attn(query, key, value=value, attn_mask=attn_mask,
key_padding_mask=key_padding_mask)[0]
return residual + self.dropout(out)
def __repr__(self):
"""str: a string that describes the module"""
repr_str = self.__class__.__name__
repr_str += f'(embed_dims={self.embed_dims}, '
repr_str += f'num_heads={self.num_heads}, '
repr_str += f'dropout={self.dropout})'
return repr_str
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [4, 4]
|
Model | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ms/cmsuzohbg5nq52jnvirovzkvykrzzko5xomu7zyu5e5u2lhegppw.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# out => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], -1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/b7/cb7yiqdigd2vu5it7f2y6axob3bgvkx2ecs3nmymezsrlxsu2jhl.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# out_1 => relu
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_4), kwargs = {})
# %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/b7/cb7iq44xucvx4o4uio3etz5hrrkllxx5igr3vjyglpwcku6mi232.py
# Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# sigmoid => sigmoid
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_8), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_sigmoid_2 = async_compile.triton('triton_poi_fused_sigmoid_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (32, 8), (8, 1))
assert_size_stride(primals_4, (32, ), (1, ))
assert_size_stride(primals_5, (4, 32), (32, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (1, 32), (32, 1))
assert_size_stride(primals_8, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 32), (32, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 32), (1, 8), 0), out=buf1)
del primals_3
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf2, primals_4, 128, grid=grid(128), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, buf2, reinterpret_tensor(primals_5, (32, 4), (1, 32), 0), alpha=1, beta=1, out=buf3)
del primals_6
buf4 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf2, reinterpret_tensor(primals_7, (32, 1), (1, 32), 0), out=buf4)
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_2.run(buf5, primals_8, 4, grid=grid(4), stream=stream0)
del primals_8
return (reinterpret_tensor(buf3, (4, 2, 2), (4, 2, 1), 0), buf5, buf0, buf2, buf5, primals_7, primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((32, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 32), (32, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, 32), (32, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
class Model(nn.Module):
def __init__(self, input_size, action_dim, conv=False, conv_size=16,
fc_size=32, K=2):
super(Model, self).__init__()
self.input_size = input_size
self.input_h = int(np.sqrt(input_size))
self.action_dim = action_dim
self.K = K
self.conv = conv
self.conv_size = conv_size
self.fc_size = fc_size
if self.conv:
self.conv1 = nn.Conv2d(1, self.conv_size, kernel_size=3, stride=1)
self.conv2 = nn.Conv2d(self.conv_size, self.conv_size,
kernel_size=3, stride=1)
self.conv3 = nn.Conv2d(self.conv_size, self.conv_size,
kernel_size=3, stride=1)
self.conv4 = nn.Conv2d(self.conv_size, self.conv_size,
kernel_size=3, stride=1)
self.fc = nn.Linear(2 * 2 * self.conv_size + self.action_dim,
self.fc_size)
else:
self.fc = nn.Linear(self.input_size + self.action_dim, self.fc_size
)
self.rew_out = nn.Linear(self.fc_size, 1)
self.pred_out = nn.Linear(self.fc_size, self.input_size)
def forward(self, x, a):
if self.conv:
out = x.unsqueeze(1)
out = F.relu(self.conv1(out))
out = F.relu(self.conv2(out))
out = F.relu(self.conv3(out))
out = F.relu(self.conv4(out))
out = out.view(out.size(0), -1)
out = torch.cat((out, a), dim=-1)
else:
out = torch.cat((x, a), dim=-1)
out = F.relu(self.fc(out))
return self.pred_out(out).reshape(out.size(0), self.input_h, self.
input_h), torch.sigmoid(self.rew_out(out))
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'action_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (32, 8), (8, 1))
assert_size_stride(primals_4, (32,), (1,))
assert_size_stride(primals_5, (4, 32), (32, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (1, 32), (32, 1))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 32), (32, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 32), (1,
8), 0), out=buf1)
del primals_3
buf2 = buf1
del buf1
triton_poi_fused_relu_1[grid(128)](buf2, primals_4, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, buf2, reinterpret_tensor(primals_5,
(32, 4), (1, 32), 0), alpha=1, beta=1, out=buf3)
del primals_6
buf4 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_7, (32, 1), (1,
32), 0), out=buf4)
buf5 = buf4
del buf4
triton_poi_fused_sigmoid_2[grid(4)](buf5, primals_8, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_8
return reinterpret_tensor(buf3, (4, 2, 2), (4, 2, 1), 0
), buf5, buf0, buf2, buf5, primals_7, primals_5
class ModelNew(nn.Module):
def __init__(self, input_size, action_dim, conv=False, conv_size=16,
fc_size=32, K=2):
super(ModelNew, self).__init__()
self.input_size = input_size
self.input_h = int(np.sqrt(input_size))
self.action_dim = action_dim
self.K = K
self.conv = conv
self.conv_size = conv_size
self.fc_size = fc_size
if self.conv:
self.conv1 = nn.Conv2d(1, self.conv_size, kernel_size=3, stride=1)
self.conv2 = nn.Conv2d(self.conv_size, self.conv_size,
kernel_size=3, stride=1)
self.conv3 = nn.Conv2d(self.conv_size, self.conv_size,
kernel_size=3, stride=1)
self.conv4 = nn.Conv2d(self.conv_size, self.conv_size,
kernel_size=3, stride=1)
self.fc = nn.Linear(2 * 2 * self.conv_size + self.action_dim,
self.fc_size)
else:
self.fc = nn.Linear(self.input_size + self.action_dim, self.fc_size
)
self.rew_out = nn.Linear(self.fc_size, 1)
self.pred_out = nn.Linear(self.fc_size, self.input_size)
def forward(self, input_0, input_1):
primals_3 = self.fc.weight
primals_4 = self.fc.bias
primals_7 = self.rew_out.weight
primals_8 = self.rew_out.bias
primals_5 = self.pred_out.weight
primals_6 = self.pred_out.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0], output[1]
| VashishtMadhavan/pytorch-maml-rl | Model | false | 1,174 | [
"MIT"
] | 0 | d8821b8374d973869bb6a1393f1b2c369c9a664b | https://github.com/VashishtMadhavan/pytorch-maml-rl/tree/d8821b8374d973869bb6a1393f1b2c369c9a664b | import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
class Model(nn.Module):
def __init__(self, input_size, action_dim, conv=False, conv_size=16,
fc_size=32, K=2):
super(Model, self).__init__()
self.input_size = input_size
self.input_h = int(np.sqrt(input_size))
self.action_dim = action_dim
self.K = K
self.conv = conv
self.conv_size = conv_size
self.fc_size = fc_size
if self.conv:
self.conv1 = nn.Conv2d(1, self.conv_size, kernel_size=3, stride=1)
self.conv2 = nn.Conv2d(self.conv_size, self.conv_size,
kernel_size=3, stride=1)
self.conv3 = nn.Conv2d(self.conv_size, self.conv_size,
kernel_size=3, stride=1)
self.conv4 = nn.Conv2d(self.conv_size, self.conv_size,
kernel_size=3, stride=1)
self.fc = nn.Linear(2 * 2 * self.conv_size + self.action_dim,
self.fc_size)
else:
self.fc = nn.Linear(self.input_size + self.action_dim, self.fc_size
)
self.rew_out = nn.Linear(self.fc_size, 1)
self.pred_out = nn.Linear(self.fc_size, self.input_size)
def forward(self, x, a):
if self.conv:
out = x.unsqueeze(1)
out = F.relu(self.conv1(out))
out = F.relu(self.conv2(out))
out = F.relu(self.conv3(out))
out = F.relu(self.conv4(out))
out = out.view(out.size(0), -1)
out = torch.cat((out, a), dim=-1)
else:
out = torch.cat((x, a), dim=-1)
out = F.relu(self.fc(out))
return self.pred_out(out).reshape(out.size(0), self.input_h, self.
input_h), torch.sigmoid(self.rew_out(out))
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4, 4]
|
SoftClDiceLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/2z/c2z52nbkqzfj3pb424z5u5ipfv34l5rrdunfhla6b4x2j5hkbbab.py
# Topologically Sorted Source Nodes: [neg, neg_2, neg_4, neg_6, neg_8, neg_10], Original ATen: [aten.neg]
# Source node to ATen node mapping:
# neg => neg
# neg_10 => neg_10
# neg_2 => neg_2
# neg_4 => neg_4
# neg_6 => neg_6
# neg_8 => neg_8
# Graph fragment:
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {})
# %neg_2 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {})
# %neg_4 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {})
# %neg_6 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {})
# %neg_8 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {})
# %neg_10 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {})
triton_poi_fused_neg_0 = async_compile.triton('triton_poi_fused_neg_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_neg_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_neg_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = -tmp0
tl.store(out_ptr0 + (x0), tmp1, xmask)
tl.store(out_ptr1 + (x0), tmp1, xmask)
tl.store(out_ptr2 + (x0), tmp1, xmask)
tl.store(out_ptr3 + (x0), tmp1, xmask)
tl.store(out_ptr4 + (x0), tmp1, xmask)
tl.store(out_ptr5 + (x0), tmp1, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ip/cipxs3ylzoary4hxycufopeumjuvo3z7qohbyxj2bt6bqzryv6uq.py
# Topologically Sorted Source Nodes: [p1, p3, min_1, p2, min_2], Original ATen: [aten.neg, aten.minimum]
# Source node to ATen node mapping:
# min_1 => minimum
# min_2 => minimum_1
# p1 => neg_1
# p2 => neg_3
# p3 => neg_5
# Graph fragment:
# %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem,), kwargs = {})
# %neg_5 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_4,), kwargs = {})
# %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_1, %neg_5), kwargs = {})
# %neg_3 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_2,), kwargs = {})
# %minimum_1 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum, %neg_3), kwargs = {})
triton_poi_fused_minimum_neg_1 = async_compile.triton('triton_poi_fused_minimum_neg_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_minimum_neg_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_minimum_neg_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tl.load(in_out_ptr0 + (x0), xmask)
tmp5 = tl.load(in_ptr1 + (x0), xmask)
tmp1 = -tmp0
tmp3 = -tmp2
tmp4 = triton_helpers.minimum(tmp1, tmp3)
tmp6 = -tmp5
tmp7 = triton_helpers.minimum(tmp4, tmp6)
tl.store(in_out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/mb/cmb6p6rbnihurmykwbijcopqwoqifvljhaqdogzdkvjx2qxcpe3d.py
# Topologically Sorted Source Nodes: [p1_1, p3_1, min_3, p2_1, img, neg_12, neg_14, neg_16, neg_18, neg_20, neg_22], Original ATen: [aten.neg, aten.minimum]
# Source node to ATen node mapping:
# img => minimum_3
# min_3 => minimum_2
# neg_12 => neg_12
# neg_14 => neg_14
# neg_16 => neg_16
# neg_18 => neg_18
# neg_20 => neg_20
# neg_22 => neg_22
# p1_1 => neg_7
# p2_1 => neg_9
# p3_1 => neg_11
# Graph fragment:
# %neg_7 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_8,), kwargs = {})
# %neg_11 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_12,), kwargs = {})
# %minimum_2 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_7, %neg_11), kwargs = {})
# %neg_9 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_10,), kwargs = {})
# %minimum_3 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_2, %neg_9), kwargs = {})
# %neg_12 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%minimum_3,), kwargs = {})
# %neg_14 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%minimum_3,), kwargs = {})
# %neg_16 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%minimum_3,), kwargs = {})
# %neg_18 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%minimum_3,), kwargs = {})
# %neg_20 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%minimum_3,), kwargs = {})
# %neg_22 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%minimum_3,), kwargs = {})
triton_poi_fused_minimum_neg_2 = async_compile.triton('triton_poi_fused_minimum_neg_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_minimum_neg_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_minimum_neg_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask)
tmp5 = tl.load(in_ptr2 + (x0), xmask)
tmp1 = -tmp0
tmp3 = -tmp2
tmp4 = triton_helpers.minimum(tmp1, tmp3)
tmp6 = -tmp5
tmp7 = triton_helpers.minimum(tmp4, tmp6)
tmp8 = -tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp8, xmask)
tl.store(out_ptr2 + (x0), tmp8, xmask)
tl.store(out_ptr3 + (x0), tmp8, xmask)
tl.store(out_ptr4 + (x0), tmp8, xmask)
tl.store(out_ptr5 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/th/cthwrbbxdwwanqojjtt5chxxmkjglywngkngkac2r6vpnuzpgql4.py
# Topologically Sorted Source Nodes: [p1_2, p3_2, min_5, p2_2, min_6], Original ATen: [aten.neg, aten.minimum]
# Source node to ATen node mapping:
# min_5 => minimum_4
# min_6 => minimum_5
# p1_2 => neg_13
# p2_2 => neg_15
# p3_2 => neg_17
# Graph fragment:
# %neg_13 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_14,), kwargs = {})
# %neg_17 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_18,), kwargs = {})
# %minimum_4 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_13, %neg_17), kwargs = {})
# %neg_15 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_16,), kwargs = {})
# %minimum_5 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_4, %neg_15), kwargs = {})
triton_poi_fused_minimum_neg_3 = async_compile.triton('triton_poi_fused_minimum_neg_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_minimum_neg_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_minimum_neg_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp2 = tl.load(in_ptr0 + (x0), xmask)
tmp5 = tl.load(in_ptr1 + (x0), xmask)
tmp1 = -tmp0
tmp3 = -tmp2
tmp4 = triton_helpers.minimum(tmp1, tmp3)
tmp6 = -tmp5
tmp7 = triton_helpers.minimum(tmp4, tmp6)
tl.store(in_out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ww/cww6s7nwjtvg2ybwkuppshbo2yvgxddgy5yparllmleqccomaomq.py
# Topologically Sorted Source Nodes: [p1_99, p3_99, min_199, p2_99, img_49, neg_600, neg_602, neg_604], Original ATen: [aten.neg, aten.minimum]
# Source node to ATen node mapping:
# img_49 => minimum_199
# min_199 => minimum_198
# neg_600 => neg_600
# neg_602 => neg_602
# neg_604 => neg_604
# p1_99 => neg_595
# p2_99 => neg_597
# p3_99 => neg_599
# Graph fragment:
# %neg_595 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_694,), kwargs = {})
# %neg_599 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_698,), kwargs = {})
# %minimum_198 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_595, %neg_599), kwargs = {})
# %neg_597 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_696,), kwargs = {})
# %minimum_199 : [num_users=4] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_198, %neg_597), kwargs = {})
# %neg_600 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%minimum_199,), kwargs = {})
# %neg_602 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%minimum_199,), kwargs = {})
# %neg_604 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%minimum_199,), kwargs = {})
triton_poi_fused_minimum_neg_4 = async_compile.triton('triton_poi_fused_minimum_neg_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_minimum_neg_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_minimum_neg_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask)
tmp5 = tl.load(in_ptr2 + (x0), xmask)
tmp1 = -tmp0
tmp3 = -tmp2
tmp4 = triton_helpers.minimum(tmp1, tmp3)
tmp6 = -tmp5
tmp7 = triton_helpers.minimum(tmp4, tmp6)
tmp8 = -tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp8, xmask)
tl.store(out_ptr2 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/2j/c2j6qnrimplcljdjf3xksxfmllenyl573okraxpgsvaqf4wbzael.py
# Topologically Sorted Source Nodes: [p1_102, p3_102, min_205, p2_102, img_50, p1_104, p3_104, min_209, p2_104, img_51, p1_106, p3_106, min_213, p2_106, img_52, p1_108, p3_108, min_217, p2_108, img_53, p1_110, p3_110, min_221, p2_110, img_54, p1_112, p3_112, min_225, p2_112, img_55, p1_114, p3_114, min_229, p2_114, img_56, p1_116, p3_116, min_233, p2_116, img_57, p1_118, p3_118, min_237, p2_118, img_58, p1_120, p3_120, min_241, p2_120, img_59, p1_122, p3_122, min_245, p2_122, img_60, p1_124, p3_124, min_249, p2_124, img_61, p1_126, p3_126, min_253, p2_126, img_62, p1_128, p3_128, min_257, p2_128, img_63, p1_130, p3_130, min_261, p2_130, img_64, p1_132, p3_132, min_265, p2_132, img_65, p1_134, p3_134, min_269, p2_134, img_66, p1_136, p3_136, min_273, p2_136, img_67, p1_138, p3_138, min_277, p2_138, img_68, p1_140, p3_140, min_281, p2_140, img_69, p1_142, p3_142, min_285, p2_142, img_70, p1_144, p3_144, min_289, p2_144, img_71, p1_146, p3_146, min_293, p2_146, img_72, p1_148, p3_148, min_297, p2_148, img_73, p1_150, p3_150, min_301, p2_150, img_74, p1_152, p3_152, min_305, p2_152, img_75, p1_154, p3_154, min_309, p2_154, img_76, p1_156, p3_156, min_313, p2_156, img_77, p1_158, p3_158, min_317, p2_158, img_78, p1_160, p3_160, min_321, p2_160, img_79, p1_162, p3_162, min_325, p2_162, img_80, p1_164, p3_164, min_329, p2_164, img_81, p1_166, p3_166, min_333, p2_166, img_82, p1_168, p3_168, min_337, p2_168, img_83, p1_170, p3_170, min_341, p2_170, img_84, p1_172, p3_172, min_345, p2_172, img_85, p1_174, p3_174, min_349, p2_174, img_86, p1_176, p3_176, min_353, p2_176, img_87, p1_178, p3_178, min_357, p2_178, img_88, p1_180, p3_180, min_361, p2_180, img_89, p1_182, p3_182, min_365, p2_182, img_90, p1_184, p3_184, min_369, p2_184, img_91, p1_186, p3_186, min_373, p2_186, img_92, p1_188, p3_188, min_377, p2_188, img_93, p1_190, p3_190, min_381, p2_190, img_94, p1_192, p3_192, min_385, p2_192, img_95, p1_194, p3_194, min_389, p2_194, img_96, p1_196, p3_196, min_393, p2_196, img_97, p1_198, p3_198, min_397, p2_198, img_98, p1_200, p3_200, min_401, p2_200, img_99, sub_101, skel_51, sub_102, delta_50, mul_50, sub_103, relu_103, skel_52, sub_104, delta_51, mul_51, sub_105, relu_105, skel_53, sub_106, delta_52, mul_52, sub_107, relu_107, skel_54, sub_108, delta_53, mul_53, sub_109, relu_109, skel_55, sub_110, delta_54, mul_54, sub_111, relu_111, skel_56, sub_112, delta_55, mul_55, sub_113, relu_113, skel_57, sub_114, delta_56, mul_56, sub_115, relu_115, skel_58, sub_116, delta_57, mul_57, sub_117, relu_117, skel_59, sub_118, delta_58, mul_58, sub_119, relu_119, skel_60, sub_120, delta_59, mul_59, sub_121, relu_121, skel_61, sub_122, delta_60, mul_60, sub_123, relu_123, skel_62, sub_124, delta_61, mul_61, sub_125, relu_125, skel_63, sub_126, delta_62, mul_62, sub_127, relu_127, skel_64, sub_128, delta_63, mul_63, sub_129, relu_129, skel_65, sub_130, delta_64, mul_64, sub_131, relu_131, skel_66, sub_132, delta_65, mul_65, sub_133, relu_133, skel_67, sub_134, delta_66, mul_66, sub_135, relu_135, skel_68, sub_136, delta_67, mul_67, sub_137, relu_137, skel_69, sub_138, delta_68, mul_68, sub_139, relu_139, skel_70, sub_140, delta_69, mul_69, sub_141, relu_141, skel_71, sub_142, delta_70, mul_70, sub_143, relu_143, skel_72, sub_144, delta_71, mul_71, sub_145, relu_145, skel_73, sub_146, delta_72, mul_72, sub_147, relu_147, skel_74, sub_148, delta_73, mul_73, sub_149, relu_149, skel_75, sub_150, delta_74, mul_74, sub_151, relu_151, skel_76, sub_152, delta_75, mul_75, sub_153, relu_153, skel_77, sub_154, delta_76, mul_76, sub_155, relu_155, skel_78, sub_156, delta_77, mul_77, sub_157, relu_157, skel_79, sub_158, delta_78, mul_78, sub_159, relu_159, skel_80, sub_160, delta_79, mul_79, sub_161, relu_161, skel_81, sub_162, delta_80, mul_80, sub_163, relu_163, skel_82, sub_164, delta_81, mul_81, sub_165, relu_165, skel_83, sub_166, delta_82, mul_82, sub_167, relu_167, skel_84, sub_168, delta_83, mul_83, sub_169, relu_169, skel_85, sub_170, delta_84, mul_84, sub_171, relu_171, skel_86, sub_172, delta_85, mul_85, sub_173, relu_173, skel_87, sub_174, delta_86, mul_86, sub_175, relu_175, skel_88, sub_176, delta_87, mul_87, sub_177, relu_177, skel_89, sub_178, delta_88, mul_88, sub_179, relu_179, skel_90, sub_180, delta_89, mul_89, sub_181, relu_181, skel_91, sub_182, delta_90, mul_90, sub_183, relu_183, skel_92, sub_184, delta_91, mul_91, sub_185, relu_185, skel_93, sub_186, delta_92, mul_92, sub_187, relu_187, skel_94, sub_188, delta_93, mul_93, sub_189, relu_189, skel_95, sub_190, delta_94, mul_94, sub_191, relu_191, skel_96, sub_192, delta_95, mul_95, sub_193, relu_193, skel_97, sub_194, delta_96, mul_96, sub_195, relu_195, skel_98, sub_196, delta_97, mul_97, sub_197, relu_197, skel_99, sub_198, delta_98, mul_98, sub_199, relu_199, skel_100, sub_200, delta_99, mul_99, sub_201, relu_201, skel_101], Original ATen: [aten.neg, aten.minimum, aten.sub, aten.relu, aten.mul, aten.add]
# Source node to ATen node mapping:
# delta_50 => relu_102
# delta_51 => relu_104
# delta_52 => relu_106
# delta_53 => relu_108
# delta_54 => relu_110
# delta_55 => relu_112
# delta_56 => relu_114
# delta_57 => relu_116
# delta_58 => relu_118
# delta_59 => relu_120
# delta_60 => relu_122
# delta_61 => relu_124
# delta_62 => relu_126
# delta_63 => relu_128
# delta_64 => relu_130
# delta_65 => relu_132
# delta_66 => relu_134
# delta_67 => relu_136
# delta_68 => relu_138
# delta_69 => relu_140
# delta_70 => relu_142
# delta_71 => relu_144
# delta_72 => relu_146
# delta_73 => relu_148
# delta_74 => relu_150
# delta_75 => relu_152
# delta_76 => relu_154
# delta_77 => relu_156
# delta_78 => relu_158
# delta_79 => relu_160
# delta_80 => relu_162
# delta_81 => relu_164
# delta_82 => relu_166
# delta_83 => relu_168
# delta_84 => relu_170
# delta_85 => relu_172
# delta_86 => relu_174
# delta_87 => relu_176
# delta_88 => relu_178
# delta_89 => relu_180
# delta_90 => relu_182
# delta_91 => relu_184
# delta_92 => relu_186
# delta_93 => relu_188
# delta_94 => relu_190
# delta_95 => relu_192
# delta_96 => relu_194
# delta_97 => relu_196
# delta_98 => relu_198
# delta_99 => relu_200
# img_50 => minimum_205
# img_51 => minimum_209
# img_52 => minimum_213
# img_53 => minimum_217
# img_54 => minimum_221
# img_55 => minimum_225
# img_56 => minimum_229
# img_57 => minimum_233
# img_58 => minimum_237
# img_59 => minimum_241
# img_60 => minimum_245
# img_61 => minimum_249
# img_62 => minimum_253
# img_63 => minimum_257
# img_64 => minimum_261
# img_65 => minimum_265
# img_66 => minimum_269
# img_67 => minimum_273
# img_68 => minimum_277
# img_69 => minimum_281
# img_70 => minimum_285
# img_71 => minimum_289
# img_72 => minimum_293
# img_73 => minimum_297
# img_74 => minimum_301
# img_75 => minimum_305
# img_76 => minimum_309
# img_77 => minimum_313
# img_78 => minimum_317
# img_79 => minimum_321
# img_80 => minimum_325
# img_81 => minimum_329
# img_82 => minimum_333
# img_83 => minimum_337
# img_84 => minimum_341
# img_85 => minimum_345
# img_86 => minimum_349
# img_87 => minimum_353
# img_88 => minimum_357
# img_89 => minimum_361
# img_90 => minimum_365
# img_91 => minimum_369
# img_92 => minimum_373
# img_93 => minimum_377
# img_94 => minimum_381
# img_95 => minimum_385
# img_96 => minimum_389
# img_97 => minimum_393
# img_98 => minimum_397
# img_99 => minimum_401
# min_205 => minimum_204
# min_209 => minimum_208
# min_213 => minimum_212
# min_217 => minimum_216
# min_221 => minimum_220
# min_225 => minimum_224
# min_229 => minimum_228
# min_233 => minimum_232
# min_237 => minimum_236
# min_241 => minimum_240
# min_245 => minimum_244
# min_249 => minimum_248
# min_253 => minimum_252
# min_257 => minimum_256
# min_261 => minimum_260
# min_265 => minimum_264
# min_269 => minimum_268
# min_273 => minimum_272
# min_277 => minimum_276
# min_281 => minimum_280
# min_285 => minimum_284
# min_289 => minimum_288
# min_293 => minimum_292
# min_297 => minimum_296
# min_301 => minimum_300
# min_305 => minimum_304
# min_309 => minimum_308
# min_313 => minimum_312
# min_317 => minimum_316
# min_321 => minimum_320
# min_325 => minimum_324
# min_329 => minimum_328
# min_333 => minimum_332
# min_337 => minimum_336
# min_341 => minimum_340
# min_345 => minimum_344
# min_349 => minimum_348
# min_353 => minimum_352
# min_357 => minimum_356
# min_361 => minimum_360
# min_365 => minimum_364
# min_369 => minimum_368
# min_373 => minimum_372
# min_377 => minimum_376
# min_381 => minimum_380
# min_385 => minimum_384
# min_389 => minimum_388
# min_393 => minimum_392
# min_397 => minimum_396
# min_401 => minimum_400
# mul_50 => mul_50
# mul_51 => mul_51
# mul_52 => mul_52
# mul_53 => mul_53
# mul_54 => mul_54
# mul_55 => mul_55
# mul_56 => mul_56
# mul_57 => mul_57
# mul_58 => mul_58
# mul_59 => mul_59
# mul_60 => mul_60
# mul_61 => mul_61
# mul_62 => mul_62
# mul_63 => mul_63
# mul_64 => mul_64
# mul_65 => mul_65
# mul_66 => mul_66
# mul_67 => mul_67
# mul_68 => mul_68
# mul_69 => mul_69
# mul_70 => mul_70
# mul_71 => mul_71
# mul_72 => mul_72
# mul_73 => mul_73
# mul_74 => mul_74
# mul_75 => mul_75
# mul_76 => mul_76
# mul_77 => mul_77
# mul_78 => mul_78
# mul_79 => mul_79
# mul_80 => mul_80
# mul_81 => mul_81
# mul_82 => mul_82
# mul_83 => mul_83
# mul_84 => mul_84
# mul_85 => mul_85
# mul_86 => mul_86
# mul_87 => mul_87
# mul_88 => mul_88
# mul_89 => mul_89
# mul_90 => mul_90
# mul_91 => mul_91
# mul_92 => mul_92
# mul_93 => mul_93
# mul_94 => mul_94
# mul_95 => mul_95
# mul_96 => mul_96
# mul_97 => mul_97
# mul_98 => mul_98
# mul_99 => mul_99
# p1_102 => neg_613
# p1_104 => neg_625
# p1_106 => neg_637
# p1_108 => neg_649
# p1_110 => neg_661
# p1_112 => neg_673
# p1_114 => neg_685
# p1_116 => neg_697
# p1_118 => neg_709
# p1_120 => neg_721
# p1_122 => neg_733
# p1_124 => neg_745
# p1_126 => neg_757
# p1_128 => neg_769
# p1_130 => neg_781
# p1_132 => neg_793
# p1_134 => neg_805
# p1_136 => neg_817
# p1_138 => neg_829
# p1_140 => neg_841
# p1_142 => neg_853
# p1_144 => neg_865
# p1_146 => neg_877
# p1_148 => neg_889
# p1_150 => neg_901
# p1_152 => neg_913
# p1_154 => neg_925
# p1_156 => neg_937
# p1_158 => neg_949
# p1_160 => neg_961
# p1_162 => neg_973
# p1_164 => neg_985
# p1_166 => neg_997
# p1_168 => neg_1009
# p1_170 => neg_1021
# p1_172 => neg_1033
# p1_174 => neg_1045
# p1_176 => neg_1057
# p1_178 => neg_1069
# p1_180 => neg_1081
# p1_182 => neg_1093
# p1_184 => neg_1105
# p1_186 => neg_1117
# p1_188 => neg_1129
# p1_190 => neg_1141
# p1_192 => neg_1153
# p1_194 => neg_1165
# p1_196 => neg_1177
# p1_198 => neg_1189
# p1_200 => neg_1201
# p2_102 => neg_615
# p2_104 => neg_627
# p2_106 => neg_639
# p2_108 => neg_651
# p2_110 => neg_663
# p2_112 => neg_675
# p2_114 => neg_687
# p2_116 => neg_699
# p2_118 => neg_711
# p2_120 => neg_723
# p2_122 => neg_735
# p2_124 => neg_747
# p2_126 => neg_759
# p2_128 => neg_771
# p2_130 => neg_783
# p2_132 => neg_795
# p2_134 => neg_807
# p2_136 => neg_819
# p2_138 => neg_831
# p2_140 => neg_843
# p2_142 => neg_855
# p2_144 => neg_867
# p2_146 => neg_879
# p2_148 => neg_891
# p2_150 => neg_903
# p2_152 => neg_915
# p2_154 => neg_927
# p2_156 => neg_939
# p2_158 => neg_951
# p2_160 => neg_963
# p2_162 => neg_975
# p2_164 => neg_987
# p2_166 => neg_999
# p2_168 => neg_1011
# p2_170 => neg_1023
# p2_172 => neg_1035
# p2_174 => neg_1047
# p2_176 => neg_1059
# p2_178 => neg_1071
# p2_180 => neg_1083
# p2_182 => neg_1095
# p2_184 => neg_1107
# p2_186 => neg_1119
# p2_188 => neg_1131
# p2_190 => neg_1143
# p2_192 => neg_1155
# p2_194 => neg_1167
# p2_196 => neg_1179
# p2_198 => neg_1191
# p2_200 => neg_1203
# p3_102 => neg_617
# p3_104 => neg_629
# p3_106 => neg_641
# p3_108 => neg_653
# p3_110 => neg_665
# p3_112 => neg_677
# p3_114 => neg_689
# p3_116 => neg_701
# p3_118 => neg_713
# p3_120 => neg_725
# p3_122 => neg_737
# p3_124 => neg_749
# p3_126 => neg_761
# p3_128 => neg_773
# p3_130 => neg_785
# p3_132 => neg_797
# p3_134 => neg_809
# p3_136 => neg_821
# p3_138 => neg_833
# p3_140 => neg_845
# p3_142 => neg_857
# p3_144 => neg_869
# p3_146 => neg_881
# p3_148 => neg_893
# p3_150 => neg_905
# p3_152 => neg_917
# p3_154 => neg_929
# p3_156 => neg_941
# p3_158 => neg_953
# p3_160 => neg_965
# p3_162 => neg_977
# p3_164 => neg_989
# p3_166 => neg_1001
# p3_168 => neg_1013
# p3_170 => neg_1025
# p3_172 => neg_1037
# p3_174 => neg_1049
# p3_176 => neg_1061
# p3_178 => neg_1073
# p3_180 => neg_1085
# p3_182 => neg_1097
# p3_184 => neg_1109
# p3_186 => neg_1121
# p3_188 => neg_1133
# p3_190 => neg_1145
# p3_192 => neg_1157
# p3_194 => neg_1169
# p3_196 => neg_1181
# p3_198 => neg_1193
# p3_200 => neg_1205
# relu_103 => relu_103
# relu_105 => relu_105
# relu_107 => relu_107
# relu_109 => relu_109
# relu_111 => relu_111
# relu_113 => relu_113
# relu_115 => relu_115
# relu_117 => relu_117
# relu_119 => relu_119
# relu_121 => relu_121
# relu_123 => relu_123
# relu_125 => relu_125
# relu_127 => relu_127
# relu_129 => relu_129
# relu_131 => relu_131
# relu_133 => relu_133
# relu_135 => relu_135
# relu_137 => relu_137
# relu_139 => relu_139
# relu_141 => relu_141
# relu_143 => relu_143
# relu_145 => relu_145
# relu_147 => relu_147
# relu_149 => relu_149
# relu_151 => relu_151
# relu_153 => relu_153
# relu_155 => relu_155
# relu_157 => relu_157
# relu_159 => relu_159
# relu_161 => relu_161
# relu_163 => relu_163
# relu_165 => relu_165
# relu_167 => relu_167
# relu_169 => relu_169
# relu_171 => relu_171
# relu_173 => relu_173
# relu_175 => relu_175
# relu_177 => relu_177
# relu_179 => relu_179
# relu_181 => relu_181
# relu_183 => relu_183
# relu_185 => relu_185
# relu_187 => relu_187
# relu_189 => relu_189
# relu_191 => relu_191
# relu_193 => relu_193
# relu_195 => relu_195
# relu_197 => relu_197
# relu_199 => relu_199
# relu_201 => relu_201
# skel_100 => add_98
# skel_101 => add_99
# skel_51 => relu_101
# skel_52 => add_50
# skel_53 => add_51
# skel_54 => add_52
# skel_55 => add_53
# skel_56 => add_54
# skel_57 => add_55
# skel_58 => add_56
# skel_59 => add_57
# skel_60 => add_58
# skel_61 => add_59
# skel_62 => add_60
# skel_63 => add_61
# skel_64 => add_62
# skel_65 => add_63
# skel_66 => add_64
# skel_67 => add_65
# skel_68 => add_66
# skel_69 => add_67
# skel_70 => add_68
# skel_71 => add_69
# skel_72 => add_70
# skel_73 => add_71
# skel_74 => add_72
# skel_75 => add_73
# skel_76 => add_74
# skel_77 => add_75
# skel_78 => add_76
# skel_79 => add_77
# skel_80 => add_78
# skel_81 => add_79
# skel_82 => add_80
# skel_83 => add_81
# skel_84 => add_82
# skel_85 => add_83
# skel_86 => add_84
# skel_87 => add_85
# skel_88 => add_86
# skel_89 => add_87
# skel_90 => add_88
# skel_91 => add_89
# skel_92 => add_90
# skel_93 => add_91
# skel_94 => add_92
# skel_95 => add_93
# skel_96 => add_94
# skel_97 => add_95
# skel_98 => add_96
# skel_99 => add_97
# sub_101 => sub_101
# sub_102 => sub_102
# sub_103 => sub_103
# sub_104 => sub_104
# sub_105 => sub_105
# sub_106 => sub_106
# sub_107 => sub_107
# sub_108 => sub_108
# sub_109 => sub_109
# sub_110 => sub_110
# sub_111 => sub_111
# sub_112 => sub_112
# sub_113 => sub_113
# sub_114 => sub_114
# sub_115 => sub_115
# sub_116 => sub_116
# sub_117 => sub_117
# sub_118 => sub_118
# sub_119 => sub_119
# sub_120 => sub_120
# sub_121 => sub_121
# sub_122 => sub_122
# sub_123 => sub_123
# sub_124 => sub_124
# sub_125 => sub_125
# sub_126 => sub_126
# sub_127 => sub_127
# sub_128 => sub_128
# sub_129 => sub_129
# sub_130 => sub_130
# sub_131 => sub_131
# sub_132 => sub_132
# sub_133 => sub_133
# sub_134 => sub_134
# sub_135 => sub_135
# sub_136 => sub_136
# sub_137 => sub_137
# sub_138 => sub_138
# sub_139 => sub_139
# sub_140 => sub_140
# sub_141 => sub_141
# sub_142 => sub_142
# sub_143 => sub_143
# sub_144 => sub_144
# sub_145 => sub_145
# sub_146 => sub_146
# sub_147 => sub_147
# sub_148 => sub_148
# sub_149 => sub_149
# sub_150 => sub_150
# sub_151 => sub_151
# sub_152 => sub_152
# sub_153 => sub_153
# sub_154 => sub_154
# sub_155 => sub_155
# sub_156 => sub_156
# sub_157 => sub_157
# sub_158 => sub_158
# sub_159 => sub_159
# sub_160 => sub_160
# sub_161 => sub_161
# sub_162 => sub_162
# sub_163 => sub_163
# sub_164 => sub_164
# sub_165 => sub_165
# sub_166 => sub_166
# sub_167 => sub_167
# sub_168 => sub_168
# sub_169 => sub_169
# sub_170 => sub_170
# sub_171 => sub_171
# sub_172 => sub_172
# sub_173 => sub_173
# sub_174 => sub_174
# sub_175 => sub_175
# sub_176 => sub_176
# sub_177 => sub_177
# sub_178 => sub_178
# sub_179 => sub_179
# sub_180 => sub_180
# sub_181 => sub_181
# sub_182 => sub_182
# sub_183 => sub_183
# sub_184 => sub_184
# sub_185 => sub_185
# sub_186 => sub_186
# sub_187 => sub_187
# sub_188 => sub_188
# sub_189 => sub_189
# sub_190 => sub_190
# sub_191 => sub_191
# sub_192 => sub_192
# sub_193 => sub_193
# sub_194 => sub_194
# sub_195 => sub_195
# sub_196 => sub_196
# sub_197 => sub_197
# sub_198 => sub_198
# sub_199 => sub_199
# sub_200 => sub_200
# sub_201 => sub_201
# Graph fragment:
# %neg_613 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_716,), kwargs = {})
# %neg_617 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_720,), kwargs = {})
# %minimum_204 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_613, %neg_617), kwargs = {})
# %neg_615 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_718,), kwargs = {})
# %minimum_205 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_204, %neg_615), kwargs = {})
# %neg_625 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_730,), kwargs = {})
# %neg_629 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_734,), kwargs = {})
# %minimum_208 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_625, %neg_629), kwargs = {})
# %neg_627 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_732,), kwargs = {})
# %minimum_209 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_208, %neg_627), kwargs = {})
# %neg_637 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_744,), kwargs = {})
# %neg_641 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_748,), kwargs = {})
# %minimum_212 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_637, %neg_641), kwargs = {})
# %neg_639 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_746,), kwargs = {})
# %minimum_213 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_212, %neg_639), kwargs = {})
# %neg_649 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_758,), kwargs = {})
# %neg_653 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_762,), kwargs = {})
# %minimum_216 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_649, %neg_653), kwargs = {})
# %neg_651 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_760,), kwargs = {})
# %minimum_217 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_216, %neg_651), kwargs = {})
# %neg_661 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_772,), kwargs = {})
# %neg_665 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_776,), kwargs = {})
# %minimum_220 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_661, %neg_665), kwargs = {})
# %neg_663 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_774,), kwargs = {})
# %minimum_221 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_220, %neg_663), kwargs = {})
# %neg_673 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_786,), kwargs = {})
# %neg_677 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_790,), kwargs = {})
# %minimum_224 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_673, %neg_677), kwargs = {})
# %neg_675 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_788,), kwargs = {})
# %minimum_225 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_224, %neg_675), kwargs = {})
# %neg_685 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_800,), kwargs = {})
# %neg_689 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_804,), kwargs = {})
# %minimum_228 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_685, %neg_689), kwargs = {})
# %neg_687 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_802,), kwargs = {})
# %minimum_229 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_228, %neg_687), kwargs = {})
# %neg_697 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_814,), kwargs = {})
# %neg_701 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_818,), kwargs = {})
# %minimum_232 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_697, %neg_701), kwargs = {})
# %neg_699 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_816,), kwargs = {})
# %minimum_233 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_232, %neg_699), kwargs = {})
# %neg_709 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_828,), kwargs = {})
# %neg_713 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_832,), kwargs = {})
# %minimum_236 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_709, %neg_713), kwargs = {})
# %neg_711 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_830,), kwargs = {})
# %minimum_237 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_236, %neg_711), kwargs = {})
# %neg_721 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_842,), kwargs = {})
# %neg_725 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_846,), kwargs = {})
# %minimum_240 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_721, %neg_725), kwargs = {})
# %neg_723 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_844,), kwargs = {})
# %minimum_241 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_240, %neg_723), kwargs = {})
# %neg_733 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_856,), kwargs = {})
# %neg_737 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_860,), kwargs = {})
# %minimum_244 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_733, %neg_737), kwargs = {})
# %neg_735 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_858,), kwargs = {})
# %minimum_245 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_244, %neg_735), kwargs = {})
# %neg_745 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_870,), kwargs = {})
# %neg_749 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_874,), kwargs = {})
# %minimum_248 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_745, %neg_749), kwargs = {})
# %neg_747 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_872,), kwargs = {})
# %minimum_249 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_248, %neg_747), kwargs = {})
# %neg_757 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_884,), kwargs = {})
# %neg_761 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_888,), kwargs = {})
# %minimum_252 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_757, %neg_761), kwargs = {})
# %neg_759 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_886,), kwargs = {})
# %minimum_253 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_252, %neg_759), kwargs = {})
# %neg_769 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_898,), kwargs = {})
# %neg_773 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_902,), kwargs = {})
# %minimum_256 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_769, %neg_773), kwargs = {})
# %neg_771 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_900,), kwargs = {})
# %minimum_257 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_256, %neg_771), kwargs = {})
# %neg_781 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_912,), kwargs = {})
# %neg_785 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_916,), kwargs = {})
# %minimum_260 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_781, %neg_785), kwargs = {})
# %neg_783 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_914,), kwargs = {})
# %minimum_261 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_260, %neg_783), kwargs = {})
# %neg_793 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_926,), kwargs = {})
# %neg_797 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_930,), kwargs = {})
# %minimum_264 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_793, %neg_797), kwargs = {})
# %neg_795 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_928,), kwargs = {})
# %minimum_265 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_264, %neg_795), kwargs = {})
# %neg_805 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_940,), kwargs = {})
# %neg_809 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_944,), kwargs = {})
# %minimum_268 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_805, %neg_809), kwargs = {})
# %neg_807 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_942,), kwargs = {})
# %minimum_269 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_268, %neg_807), kwargs = {})
# %neg_817 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_954,), kwargs = {})
# %neg_821 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_958,), kwargs = {})
# %minimum_272 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_817, %neg_821), kwargs = {})
# %neg_819 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_956,), kwargs = {})
# %minimum_273 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_272, %neg_819), kwargs = {})
# %neg_829 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_968,), kwargs = {})
# %neg_833 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_972,), kwargs = {})
# %minimum_276 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_829, %neg_833), kwargs = {})
# %neg_831 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_970,), kwargs = {})
# %minimum_277 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_276, %neg_831), kwargs = {})
# %neg_841 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_982,), kwargs = {})
# %neg_845 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_986,), kwargs = {})
# %minimum_280 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_841, %neg_845), kwargs = {})
# %neg_843 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_984,), kwargs = {})
# %minimum_281 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_280, %neg_843), kwargs = {})
# %neg_853 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_996,), kwargs = {})
# %neg_857 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1000,), kwargs = {})
# %minimum_284 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_853, %neg_857), kwargs = {})
# %neg_855 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_998,), kwargs = {})
# %minimum_285 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_284, %neg_855), kwargs = {})
# %neg_865 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1010,), kwargs = {})
# %neg_869 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1014,), kwargs = {})
# %minimum_288 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_865, %neg_869), kwargs = {})
# %neg_867 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1012,), kwargs = {})
# %minimum_289 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_288, %neg_867), kwargs = {})
# %neg_877 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1024,), kwargs = {})
# %neg_881 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1028,), kwargs = {})
# %minimum_292 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_877, %neg_881), kwargs = {})
# %neg_879 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1026,), kwargs = {})
# %minimum_293 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_292, %neg_879), kwargs = {})
# %neg_889 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1038,), kwargs = {})
# %neg_893 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1042,), kwargs = {})
# %minimum_296 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_889, %neg_893), kwargs = {})
# %neg_891 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1040,), kwargs = {})
# %minimum_297 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_296, %neg_891), kwargs = {})
# %neg_901 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1052,), kwargs = {})
# %neg_905 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1056,), kwargs = {})
# %minimum_300 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_901, %neg_905), kwargs = {})
# %neg_903 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1054,), kwargs = {})
# %minimum_301 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_300, %neg_903), kwargs = {})
# %neg_913 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1066,), kwargs = {})
# %neg_917 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1070,), kwargs = {})
# %minimum_304 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_913, %neg_917), kwargs = {})
# %neg_915 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1068,), kwargs = {})
# %minimum_305 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_304, %neg_915), kwargs = {})
# %neg_925 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1080,), kwargs = {})
# %neg_929 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1084,), kwargs = {})
# %minimum_308 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_925, %neg_929), kwargs = {})
# %neg_927 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1082,), kwargs = {})
# %minimum_309 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_308, %neg_927), kwargs = {})
# %neg_937 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1094,), kwargs = {})
# %neg_941 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1098,), kwargs = {})
# %minimum_312 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_937, %neg_941), kwargs = {})
# %neg_939 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1096,), kwargs = {})
# %minimum_313 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_312, %neg_939), kwargs = {})
# %neg_949 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1108,), kwargs = {})
# %neg_953 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1112,), kwargs = {})
# %minimum_316 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_949, %neg_953), kwargs = {})
# %neg_951 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1110,), kwargs = {})
# %minimum_317 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_316, %neg_951), kwargs = {})
# %neg_961 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1122,), kwargs = {})
# %neg_965 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1126,), kwargs = {})
# %minimum_320 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_961, %neg_965), kwargs = {})
# %neg_963 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1124,), kwargs = {})
# %minimum_321 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_320, %neg_963), kwargs = {})
# %neg_973 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1136,), kwargs = {})
# %neg_977 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1140,), kwargs = {})
# %minimum_324 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_973, %neg_977), kwargs = {})
# %neg_975 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1138,), kwargs = {})
# %minimum_325 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_324, %neg_975), kwargs = {})
# %neg_985 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1150,), kwargs = {})
# %neg_989 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1154,), kwargs = {})
# %minimum_328 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_985, %neg_989), kwargs = {})
# %neg_987 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1152,), kwargs = {})
# %minimum_329 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_328, %neg_987), kwargs = {})
# %neg_997 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1164,), kwargs = {})
# %neg_1001 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1168,), kwargs = {})
# %minimum_332 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_997, %neg_1001), kwargs = {})
# %neg_999 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1166,), kwargs = {})
# %minimum_333 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_332, %neg_999), kwargs = {})
# %neg_1009 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1178,), kwargs = {})
# %neg_1013 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1182,), kwargs = {})
# %minimum_336 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_1009, %neg_1013), kwargs = {})
# %neg_1011 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1180,), kwargs = {})
# %minimum_337 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_336, %neg_1011), kwargs = {})
# %neg_1021 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1192,), kwargs = {})
# %neg_1025 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1196,), kwargs = {})
# %minimum_340 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_1021, %neg_1025), kwargs = {})
# %neg_1023 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1194,), kwargs = {})
# %minimum_341 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_340, %neg_1023), kwargs = {})
# %neg_1033 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1206,), kwargs = {})
# %neg_1037 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1210,), kwargs = {})
# %minimum_344 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_1033, %neg_1037), kwargs = {})
# %neg_1035 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1208,), kwargs = {})
# %minimum_345 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_344, %neg_1035), kwargs = {})
# %neg_1045 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1220,), kwargs = {})
# %neg_1049 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1224,), kwargs = {})
# %minimum_348 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_1045, %neg_1049), kwargs = {})
# %neg_1047 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1222,), kwargs = {})
# %minimum_349 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_348, %neg_1047), kwargs = {})
# %neg_1057 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1234,), kwargs = {})
# %neg_1061 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1238,), kwargs = {})
# %minimum_352 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_1057, %neg_1061), kwargs = {})
# %neg_1059 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1236,), kwargs = {})
# %minimum_353 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_352, %neg_1059), kwargs = {})
# %neg_1069 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1248,), kwargs = {})
# %neg_1073 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1252,), kwargs = {})
# %minimum_356 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_1069, %neg_1073), kwargs = {})
# %neg_1071 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1250,), kwargs = {})
# %minimum_357 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_356, %neg_1071), kwargs = {})
# %neg_1081 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1262,), kwargs = {})
# %neg_1085 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1266,), kwargs = {})
# %minimum_360 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_1081, %neg_1085), kwargs = {})
# %neg_1083 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1264,), kwargs = {})
# %minimum_361 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_360, %neg_1083), kwargs = {})
# %neg_1093 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1276,), kwargs = {})
# %neg_1097 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1280,), kwargs = {})
# %minimum_364 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_1093, %neg_1097), kwargs = {})
# %neg_1095 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1278,), kwargs = {})
# %minimum_365 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_364, %neg_1095), kwargs = {})
# %neg_1105 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1290,), kwargs = {})
# %neg_1109 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1294,), kwargs = {})
# %minimum_368 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_1105, %neg_1109), kwargs = {})
# %neg_1107 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1292,), kwargs = {})
# %minimum_369 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_368, %neg_1107), kwargs = {})
# %neg_1117 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1304,), kwargs = {})
# %neg_1121 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1308,), kwargs = {})
# %minimum_372 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_1117, %neg_1121), kwargs = {})
# %neg_1119 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1306,), kwargs = {})
# %minimum_373 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_372, %neg_1119), kwargs = {})
# %neg_1129 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1318,), kwargs = {})
# %neg_1133 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1322,), kwargs = {})
# %minimum_376 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_1129, %neg_1133), kwargs = {})
# %neg_1131 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1320,), kwargs = {})
# %minimum_377 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_376, %neg_1131), kwargs = {})
# %neg_1141 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1332,), kwargs = {})
# %neg_1145 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1336,), kwargs = {})
# %minimum_380 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_1141, %neg_1145), kwargs = {})
# %neg_1143 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1334,), kwargs = {})
# %minimum_381 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_380, %neg_1143), kwargs = {})
# %neg_1153 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1346,), kwargs = {})
# %neg_1157 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1350,), kwargs = {})
# %minimum_384 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_1153, %neg_1157), kwargs = {})
# %neg_1155 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1348,), kwargs = {})
# %minimum_385 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_384, %neg_1155), kwargs = {})
# %neg_1165 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1360,), kwargs = {})
# %neg_1169 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1364,), kwargs = {})
# %minimum_388 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_1165, %neg_1169), kwargs = {})
# %neg_1167 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1362,), kwargs = {})
# %minimum_389 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_388, %neg_1167), kwargs = {})
# %neg_1177 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1374,), kwargs = {})
# %neg_1181 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1378,), kwargs = {})
# %minimum_392 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_1177, %neg_1181), kwargs = {})
# %neg_1179 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1376,), kwargs = {})
# %minimum_393 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_392, %neg_1179), kwargs = {})
# %neg_1189 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1388,), kwargs = {})
# %neg_1193 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1392,), kwargs = {})
# %minimum_396 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_1189, %neg_1193), kwargs = {})
# %neg_1191 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1390,), kwargs = {})
# %minimum_397 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_396, %neg_1191), kwargs = {})
# %neg_1201 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1402,), kwargs = {})
# %neg_1205 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1406,), kwargs = {})
# %minimum_400 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_1201, %neg_1205), kwargs = {})
# %neg_1203 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_1404,), kwargs = {})
# %minimum_401 : [num_users=4] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_400, %neg_1203), kwargs = {})
# %sub_101 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %getitem_714), kwargs = {})
# %relu_101 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_101,), kwargs = {})
# %sub_102 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_205, %getitem_728), kwargs = {})
# %relu_102 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_102,), kwargs = {})
# %mul_50 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu_101, %relu_102), kwargs = {})
# %sub_103 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_102, %mul_50), kwargs = {})
# %relu_103 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_103,), kwargs = {})
# %add_50 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu_101, %relu_103), kwargs = {})
# %sub_104 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_209, %getitem_742), kwargs = {})
# %relu_104 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_104,), kwargs = {})
# %mul_51 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_50, %relu_104), kwargs = {})
# %sub_105 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_104, %mul_51), kwargs = {})
# %relu_105 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_105,), kwargs = {})
# %add_51 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_50, %relu_105), kwargs = {})
# %sub_106 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_213, %getitem_756), kwargs = {})
# %relu_106 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_106,), kwargs = {})
# %mul_52 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_51, %relu_106), kwargs = {})
# %sub_107 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_106, %mul_52), kwargs = {})
# %relu_107 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_107,), kwargs = {})
# %add_52 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_51, %relu_107), kwargs = {})
# %sub_108 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_217, %getitem_770), kwargs = {})
# %relu_108 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_108,), kwargs = {})
# %mul_53 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_52, %relu_108), kwargs = {})
# %sub_109 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_108, %mul_53), kwargs = {})
# %relu_109 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_109,), kwargs = {})
# %add_53 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_52, %relu_109), kwargs = {})
# %sub_110 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_221, %getitem_784), kwargs = {})
# %relu_110 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_110,), kwargs = {})
# %mul_54 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_53, %relu_110), kwargs = {})
# %sub_111 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_110, %mul_54), kwargs = {})
# %relu_111 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_111,), kwargs = {})
# %add_54 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_53, %relu_111), kwargs = {})
# %sub_112 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_225, %getitem_798), kwargs = {})
# %relu_112 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_112,), kwargs = {})
# %mul_55 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_54, %relu_112), kwargs = {})
# %sub_113 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_112, %mul_55), kwargs = {})
# %relu_113 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_113,), kwargs = {})
# %add_55 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_54, %relu_113), kwargs = {})
# %sub_114 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_229, %getitem_812), kwargs = {})
# %relu_114 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_114,), kwargs = {})
# %mul_56 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_55, %relu_114), kwargs = {})
# %sub_115 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_114, %mul_56), kwargs = {})
# %relu_115 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_115,), kwargs = {})
# %add_56 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_55, %relu_115), kwargs = {})
# %sub_116 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_233, %getitem_826), kwargs = {})
# %relu_116 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_116,), kwargs = {})
# %mul_57 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_56, %relu_116), kwargs = {})
# %sub_117 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_116, %mul_57), kwargs = {})
# %relu_117 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_117,), kwargs = {})
# %add_57 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_56, %relu_117), kwargs = {})
# %sub_118 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_237, %getitem_840), kwargs = {})
# %relu_118 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_118,), kwargs = {})
# %mul_58 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_57, %relu_118), kwargs = {})
# %sub_119 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_118, %mul_58), kwargs = {})
# %relu_119 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_119,), kwargs = {})
# %add_58 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_57, %relu_119), kwargs = {})
# %sub_120 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_241, %getitem_854), kwargs = {})
# %relu_120 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_120,), kwargs = {})
# %mul_59 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_58, %relu_120), kwargs = {})
# %sub_121 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_120, %mul_59), kwargs = {})
# %relu_121 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_121,), kwargs = {})
# %add_59 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_58, %relu_121), kwargs = {})
# %sub_122 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_245, %getitem_868), kwargs = {})
# %relu_122 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_122,), kwargs = {})
# %mul_60 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_59, %relu_122), kwargs = {})
# %sub_123 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_122, %mul_60), kwargs = {})
# %relu_123 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_123,), kwargs = {})
# %add_60 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_59, %relu_123), kwargs = {})
# %sub_124 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_249, %getitem_882), kwargs = {})
# %relu_124 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_124,), kwargs = {})
# %mul_61 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_60, %relu_124), kwargs = {})
# %sub_125 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_124, %mul_61), kwargs = {})
# %relu_125 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_125,), kwargs = {})
# %add_61 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_60, %relu_125), kwargs = {})
# %sub_126 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_253, %getitem_896), kwargs = {})
# %relu_126 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_126,), kwargs = {})
# %mul_62 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_61, %relu_126), kwargs = {})
# %sub_127 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_126, %mul_62), kwargs = {})
# %relu_127 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_127,), kwargs = {})
# %add_62 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_61, %relu_127), kwargs = {})
# %sub_128 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_257, %getitem_910), kwargs = {})
# %relu_128 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_128,), kwargs = {})
# %mul_63 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_62, %relu_128), kwargs = {})
# %sub_129 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_128, %mul_63), kwargs = {})
# %relu_129 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_129,), kwargs = {})
# %add_63 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_62, %relu_129), kwargs = {})
# %sub_130 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_261, %getitem_924), kwargs = {})
# %relu_130 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_130,), kwargs = {})
# %mul_64 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_63, %relu_130), kwargs = {})
# %sub_131 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_130, %mul_64), kwargs = {})
# %relu_131 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_131,), kwargs = {})
# %add_64 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_63, %relu_131), kwargs = {})
# %sub_132 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_265, %getitem_938), kwargs = {})
# %relu_132 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_132,), kwargs = {})
# %mul_65 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_64, %relu_132), kwargs = {})
# %sub_133 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_132, %mul_65), kwargs = {})
# %relu_133 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_133,), kwargs = {})
# %add_65 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_64, %relu_133), kwargs = {})
# %sub_134 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_269, %getitem_952), kwargs = {})
# %relu_134 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_134,), kwargs = {})
# %mul_66 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_65, %relu_134), kwargs = {})
# %sub_135 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_134, %mul_66), kwargs = {})
# %relu_135 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_135,), kwargs = {})
# %add_66 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_65, %relu_135), kwargs = {})
# %sub_136 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_273, %getitem_966), kwargs = {})
# %relu_136 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_136,), kwargs = {})
# %mul_67 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_66, %relu_136), kwargs = {})
# %sub_137 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_136, %mul_67), kwargs = {})
# %relu_137 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_137,), kwargs = {})
# %add_67 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_66, %relu_137), kwargs = {})
# %sub_138 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_277, %getitem_980), kwargs = {})
# %relu_138 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_138,), kwargs = {})
# %mul_68 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_67, %relu_138), kwargs = {})
# %sub_139 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_138, %mul_68), kwargs = {})
# %relu_139 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_139,), kwargs = {})
# %add_68 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_67, %relu_139), kwargs = {})
# %sub_140 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_281, %getitem_994), kwargs = {})
# %relu_140 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_140,), kwargs = {})
# %mul_69 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_68, %relu_140), kwargs = {})
# %sub_141 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_140, %mul_69), kwargs = {})
# %relu_141 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_141,), kwargs = {})
# %add_69 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_68, %relu_141), kwargs = {})
# %sub_142 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_285, %getitem_1008), kwargs = {})
# %relu_142 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_142,), kwargs = {})
# %mul_70 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_69, %relu_142), kwargs = {})
# %sub_143 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_142, %mul_70), kwargs = {})
# %relu_143 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_143,), kwargs = {})
# %add_70 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_69, %relu_143), kwargs = {})
# %sub_144 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_289, %getitem_1022), kwargs = {})
# %relu_144 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_144,), kwargs = {})
# %mul_71 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_70, %relu_144), kwargs = {})
# %sub_145 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_144, %mul_71), kwargs = {})
# %relu_145 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_145,), kwargs = {})
# %add_71 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_70, %relu_145), kwargs = {})
# %sub_146 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_293, %getitem_1036), kwargs = {})
# %relu_146 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_146,), kwargs = {})
# %mul_72 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_71, %relu_146), kwargs = {})
# %sub_147 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_146, %mul_72), kwargs = {})
# %relu_147 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_147,), kwargs = {})
# %add_72 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_71, %relu_147), kwargs = {})
# %sub_148 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_297, %getitem_1050), kwargs = {})
# %relu_148 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_148,), kwargs = {})
# %mul_73 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_72, %relu_148), kwargs = {})
# %sub_149 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_148, %mul_73), kwargs = {})
# %relu_149 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_149,), kwargs = {})
# %add_73 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_72, %relu_149), kwargs = {})
# %sub_150 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_301, %getitem_1064), kwargs = {})
# %relu_150 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_150,), kwargs = {})
# %mul_74 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_73, %relu_150), kwargs = {})
# %sub_151 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_150, %mul_74), kwargs = {})
# %relu_151 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_151,), kwargs = {})
# %add_74 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_73, %relu_151), kwargs = {})
# %sub_152 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_305, %getitem_1078), kwargs = {})
# %relu_152 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_152,), kwargs = {})
# %mul_75 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_74, %relu_152), kwargs = {})
# %sub_153 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_152, %mul_75), kwargs = {})
# %relu_153 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_153,), kwargs = {})
# %add_75 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_74, %relu_153), kwargs = {})
# %sub_154 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_309, %getitem_1092), kwargs = {})
# %relu_154 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_154,), kwargs = {})
# %mul_76 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_75, %relu_154), kwargs = {})
# %sub_155 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_154, %mul_76), kwargs = {})
# %relu_155 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_155,), kwargs = {})
# %add_76 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_75, %relu_155), kwargs = {})
# %sub_156 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_313, %getitem_1106), kwargs = {})
# %relu_156 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_156,), kwargs = {})
# %mul_77 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_76, %relu_156), kwargs = {})
# %sub_157 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_156, %mul_77), kwargs = {})
# %relu_157 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_157,), kwargs = {})
# %add_77 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_76, %relu_157), kwargs = {})
# %sub_158 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_317, %getitem_1120), kwargs = {})
# %relu_158 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_158,), kwargs = {})
# %mul_78 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_77, %relu_158), kwargs = {})
# %sub_159 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_158, %mul_78), kwargs = {})
# %relu_159 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_159,), kwargs = {})
# %add_78 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_77, %relu_159), kwargs = {})
# %sub_160 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_321, %getitem_1134), kwargs = {})
# %relu_160 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_160,), kwargs = {})
# %mul_79 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_78, %relu_160), kwargs = {})
# %sub_161 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_160, %mul_79), kwargs = {})
# %relu_161 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_161,), kwargs = {})
# %add_79 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_78, %relu_161), kwargs = {})
# %sub_162 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_325, %getitem_1148), kwargs = {})
# %relu_162 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_162,), kwargs = {})
# %mul_80 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_79, %relu_162), kwargs = {})
# %sub_163 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_162, %mul_80), kwargs = {})
# %relu_163 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_163,), kwargs = {})
# %add_80 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_79, %relu_163), kwargs = {})
# %sub_164 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_329, %getitem_1162), kwargs = {})
# %relu_164 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_164,), kwargs = {})
# %mul_81 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_80, %relu_164), kwargs = {})
# %sub_165 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_164, %mul_81), kwargs = {})
# %relu_165 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_165,), kwargs = {})
# %add_81 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_80, %relu_165), kwargs = {})
# %sub_166 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_333, %getitem_1176), kwargs = {})
# %relu_166 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_166,), kwargs = {})
# %mul_82 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_81, %relu_166), kwargs = {})
# %sub_167 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_166, %mul_82), kwargs = {})
# %relu_167 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_167,), kwargs = {})
# %add_82 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_81, %relu_167), kwargs = {})
# %sub_168 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_337, %getitem_1190), kwargs = {})
# %relu_168 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_168,), kwargs = {})
# %mul_83 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_82, %relu_168), kwargs = {})
# %sub_169 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_168, %mul_83), kwargs = {})
# %relu_169 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_169,), kwargs = {})
# %add_83 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_82, %relu_169), kwargs = {})
# %sub_170 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_341, %getitem_1204), kwargs = {})
# %relu_170 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_170,), kwargs = {})
# %mul_84 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_83, %relu_170), kwargs = {})
# %sub_171 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_170, %mul_84), kwargs = {})
# %relu_171 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_171,), kwargs = {})
# %add_84 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_83, %relu_171), kwargs = {})
# %sub_172 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_345, %getitem_1218), kwargs = {})
# %relu_172 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_172,), kwargs = {})
# %mul_85 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_84, %relu_172), kwargs = {})
# %sub_173 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_172, %mul_85), kwargs = {})
# %relu_173 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_173,), kwargs = {})
# %add_85 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_84, %relu_173), kwargs = {})
# %sub_174 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_349, %getitem_1232), kwargs = {})
# %relu_174 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_174,), kwargs = {})
# %mul_86 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_85, %relu_174), kwargs = {})
# %sub_175 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_174, %mul_86), kwargs = {})
# %relu_175 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_175,), kwargs = {})
# %add_86 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_85, %relu_175), kwargs = {})
# %sub_176 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_353, %getitem_1246), kwargs = {})
# %relu_176 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_176,), kwargs = {})
# %mul_87 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_86, %relu_176), kwargs = {})
# %sub_177 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_176, %mul_87), kwargs = {})
# %relu_177 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_177,), kwargs = {})
# %add_87 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_86, %relu_177), kwargs = {})
# %sub_178 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_357, %getitem_1260), kwargs = {})
# %relu_178 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_178,), kwargs = {})
# %mul_88 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_87, %relu_178), kwargs = {})
# %sub_179 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_178, %mul_88), kwargs = {})
# %relu_179 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_179,), kwargs = {})
# %add_88 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_87, %relu_179), kwargs = {})
# %sub_180 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_361, %getitem_1274), kwargs = {})
# %relu_180 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_180,), kwargs = {})
# %mul_89 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_88, %relu_180), kwargs = {})
# %sub_181 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_180, %mul_89), kwargs = {})
# %relu_181 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_181,), kwargs = {})
# %add_89 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_88, %relu_181), kwargs = {})
# %sub_182 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_365, %getitem_1288), kwargs = {})
# %relu_182 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_182,), kwargs = {})
# %mul_90 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_89, %relu_182), kwargs = {})
# %sub_183 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_182, %mul_90), kwargs = {})
# %relu_183 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_183,), kwargs = {})
# %add_90 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_89, %relu_183), kwargs = {})
# %sub_184 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_369, %getitem_1302), kwargs = {})
# %relu_184 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_184,), kwargs = {})
# %mul_91 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_90, %relu_184), kwargs = {})
# %sub_185 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_184, %mul_91), kwargs = {})
# %relu_185 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_185,), kwargs = {})
# %add_91 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_90, %relu_185), kwargs = {})
# %sub_186 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_373, %getitem_1316), kwargs = {})
# %relu_186 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_186,), kwargs = {})
# %mul_92 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_91, %relu_186), kwargs = {})
# %sub_187 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_186, %mul_92), kwargs = {})
# %relu_187 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_187,), kwargs = {})
# %add_92 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_91, %relu_187), kwargs = {})
# %sub_188 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_377, %getitem_1330), kwargs = {})
# %relu_188 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_188,), kwargs = {})
# %mul_93 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_92, %relu_188), kwargs = {})
# %sub_189 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_188, %mul_93), kwargs = {})
# %relu_189 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_189,), kwargs = {})
# %add_93 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_92, %relu_189), kwargs = {})
# %sub_190 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_381, %getitem_1344), kwargs = {})
# %relu_190 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_190,), kwargs = {})
# %mul_94 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_93, %relu_190), kwargs = {})
# %sub_191 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_190, %mul_94), kwargs = {})
# %relu_191 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_191,), kwargs = {})
# %add_94 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_93, %relu_191), kwargs = {})
# %sub_192 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_385, %getitem_1358), kwargs = {})
# %relu_192 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_192,), kwargs = {})
# %mul_95 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_94, %relu_192), kwargs = {})
# %sub_193 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_192, %mul_95), kwargs = {})
# %relu_193 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_193,), kwargs = {})
# %add_95 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_94, %relu_193), kwargs = {})
# %sub_194 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_389, %getitem_1372), kwargs = {})
# %relu_194 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_194,), kwargs = {})
# %mul_96 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_95, %relu_194), kwargs = {})
# %sub_195 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_194, %mul_96), kwargs = {})
# %relu_195 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_195,), kwargs = {})
# %add_96 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_95, %relu_195), kwargs = {})
# %sub_196 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_393, %getitem_1386), kwargs = {})
# %relu_196 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_196,), kwargs = {})
# %mul_97 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_96, %relu_196), kwargs = {})
# %sub_197 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_196, %mul_97), kwargs = {})
# %relu_197 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_197,), kwargs = {})
# %add_97 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_96, %relu_197), kwargs = {})
# %sub_198 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_397, %getitem_1400), kwargs = {})
# %relu_198 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_198,), kwargs = {})
# %mul_98 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_97, %relu_198), kwargs = {})
# %sub_199 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_198, %mul_98), kwargs = {})
# %relu_199 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_199,), kwargs = {})
# %add_98 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_97, %relu_199), kwargs = {})
# %sub_200 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_401, %getitem_1414), kwargs = {})
# %relu_200 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_200,), kwargs = {})
# %mul_99 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_98, %relu_200), kwargs = {})
# %sub_201 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_200, %mul_99), kwargs = {})
# %relu_201 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_201,), kwargs = {})
# %add_99 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_98, %relu_201), kwargs = {})
triton_poi_fused_add_minimum_mul_neg_relu_sub_5 = async_compile.triton('triton_poi_fused_add_minimum_mul_neg_relu_sub_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: '*fp32', 18: '*fp32', 19: '*fp32', 20: '*fp32', 21: '*fp32', 22: '*fp32', 23: '*fp32', 24: '*fp32', 25: '*fp32', 26: '*fp32', 27: '*fp32', 28: '*fp32', 29: '*fp32', 30: '*fp32', 31: '*fp32', 32: '*fp32', 33: '*fp32', 34: '*fp32', 35: '*fp32', 36: '*fp32', 37: '*fp32', 38: '*fp32', 39: '*fp32', 40: '*fp32', 41: '*fp32', 42: '*fp32', 43: '*fp32', 44: '*fp32', 45: '*fp32', 46: '*fp32', 47: '*fp32', 48: '*fp32', 49: '*fp32', 50: '*fp32', 51: '*fp32', 52: '*fp32', 53: '*fp32', 54: '*fp32', 55: '*fp32', 56: '*fp32', 57: '*fp32', 58: '*fp32', 59: '*fp32', 60: '*fp32', 61: '*fp32', 62: '*fp32', 63: '*fp32', 64: '*fp32', 65: '*fp32', 66: '*fp32', 67: '*fp32', 68: '*fp32', 69: '*fp32', 70: '*fp32', 71: '*fp32', 72: '*fp32', 73: '*fp32', 74: '*fp32', 75: '*fp32', 76: '*fp32', 77: '*fp32', 78: '*fp32', 79: '*fp32', 80: '*fp32', 81: '*fp32', 82: '*fp32', 83: '*fp32', 84: '*fp32', 85: '*fp32', 86: '*fp32', 87: '*fp32', 88: '*fp32', 89: '*fp32', 90: '*fp32', 91: '*fp32', 92: '*fp32', 93: '*fp32', 94: '*fp32', 95: '*fp32', 96: '*fp32', 97: '*fp32', 98: '*fp32', 99: '*fp32', 100: '*fp32', 101: '*fp32', 102: '*fp32', 103: '*fp32', 104: '*fp32', 105: '*fp32', 106: '*fp32', 107: '*fp32', 108: '*fp32', 109: '*fp32', 110: '*fp32', 111: '*fp32', 112: '*fp32', 113: '*fp32', 114: '*fp32', 115: '*fp32', 116: '*fp32', 117: '*fp32', 118: '*fp32', 119: '*fp32', 120: '*fp32', 121: '*fp32', 122: '*fp32', 123: '*fp32', 124: '*fp32', 125: '*fp32', 126: '*fp32', 127: '*fp32', 128: '*fp32', 129: '*fp32', 130: '*fp32', 131: '*fp32', 132: '*fp32', 133: '*fp32', 134: '*fp32', 135: '*fp32', 136: '*fp32', 137: '*fp32', 138: '*fp32', 139: '*fp32', 140: '*fp32', 141: '*fp32', 142: '*fp32', 143: '*fp32', 144: '*fp32', 145: '*fp32', 146: '*fp32', 147: '*fp32', 148: '*fp32', 149: '*fp32', 150: '*fp32', 151: '*fp32', 152: '*fp32', 153: '*fp32', 154: '*fp32', 155: '*fp32', 156: '*fp32', 157: '*fp32', 158: '*fp32', 159: '*fp32', 160: '*fp32', 161: '*fp32', 162: '*fp32', 163: '*fp32', 164: '*fp32', 165: '*fp32', 166: '*fp32', 167: '*fp32', 168: '*fp32', 169: '*fp32', 170: '*fp32', 171: '*fp32', 172: '*fp32', 173: '*fp32', 174: '*fp32', 175: '*fp32', 176: '*fp32', 177: '*fp32', 178: '*fp32', 179: '*fp32', 180: '*fp32', 181: '*fp32', 182: '*fp32', 183: '*fp32', 184: '*fp32', 185: '*fp32', 186: '*fp32', 187: '*fp32', 188: '*fp32', 189: '*fp32', 190: '*fp32', 191: '*fp32', 192: '*fp32', 193: '*fp32', 194: '*fp32', 195: '*fp32', 196: '*fp32', 197: '*fp32', 198: '*fp32', 199: '*fp32', 200: '*fp32', 201: '*fp32', 202: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_minimum_mul_neg_relu_sub_5', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1', 'in_out_ptr10', 'in_out_ptr11', 'in_out_ptr12', 'in_out_ptr13', 'in_out_ptr14', 'in_out_ptr15', 'in_out_ptr16', 'in_out_ptr17', 'in_out_ptr18', 'in_out_ptr19', 'in_out_ptr2', 'in_out_ptr20', 'in_out_ptr21', 'in_out_ptr22', 'in_out_ptr23', 'in_out_ptr24', 'in_out_ptr25', 'in_out_ptr26', 'in_out_ptr27', 'in_out_ptr28', 'in_out_ptr29', 'in_out_ptr3', 'in_out_ptr30', 'in_out_ptr31', 'in_out_ptr32', 'in_out_ptr33', 'in_out_ptr34', 'in_out_ptr35', 'in_out_ptr36', 'in_out_ptr37', 'in_out_ptr38', 'in_out_ptr39', 'in_out_ptr4', 'in_out_ptr40', 'in_out_ptr41', 'in_out_ptr42', 'in_out_ptr43', 'in_out_ptr44', 'in_out_ptr45', 'in_out_ptr46', 'in_out_ptr47', 'in_out_ptr48', 'in_out_ptr49', 'in_out_ptr5', 'in_out_ptr6', 'in_out_ptr7', 'in_out_ptr8', 'in_out_ptr9'], 'no_x_dim': False, 'num_load': 202, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_minimum_mul_neg_relu_sub_5(in_out_ptr0, in_out_ptr1, in_out_ptr2, in_out_ptr3, in_out_ptr4, in_out_ptr5, in_out_ptr6, in_out_ptr7, in_out_ptr8, in_out_ptr9, in_out_ptr10, in_out_ptr11, in_out_ptr12, in_out_ptr13, in_out_ptr14, in_out_ptr15, in_out_ptr16, in_out_ptr17, in_out_ptr18, in_out_ptr19, in_out_ptr20, in_out_ptr21, in_out_ptr22, in_out_ptr23, in_out_ptr24, in_out_ptr25, in_out_ptr26, in_out_ptr27, in_out_ptr28, in_out_ptr29, in_out_ptr30, in_out_ptr31, in_out_ptr32, in_out_ptr33, in_out_ptr34, in_out_ptr35, in_out_ptr36, in_out_ptr37, in_out_ptr38, in_out_ptr39, in_out_ptr40, in_out_ptr41, in_out_ptr42, in_out_ptr43, in_out_ptr44, in_out_ptr45, in_out_ptr46, in_out_ptr47, in_out_ptr48, in_out_ptr49, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24, in_ptr25, in_ptr26, in_ptr27, in_ptr28, in_ptr29, in_ptr30, in_ptr31, in_ptr32, in_ptr33, in_ptr34, in_ptr35, in_ptr36, in_ptr37, in_ptr38, in_ptr39, in_ptr40, in_ptr41, in_ptr42, in_ptr43, in_ptr44, in_ptr45, in_ptr46, in_ptr47, in_ptr48, in_ptr49, in_ptr50, in_ptr51, in_ptr52, in_ptr53, in_ptr54, in_ptr55, in_ptr56, in_ptr57, in_ptr58, in_ptr59, in_ptr60, in_ptr61, in_ptr62, in_ptr63, in_ptr64, in_ptr65, in_ptr66, in_ptr67, in_ptr68, in_ptr69, in_ptr70, in_ptr71, in_ptr72, in_ptr73, in_ptr74, in_ptr75, in_ptr76, in_ptr77, in_ptr78, in_ptr79, in_ptr80, in_ptr81, in_ptr82, in_ptr83, in_ptr84, in_ptr85, in_ptr86, in_ptr87, in_ptr88, in_ptr89, in_ptr90, in_ptr91, in_ptr92, in_ptr93, in_ptr94, in_ptr95, in_ptr96, in_ptr97, in_ptr98, in_ptr99, in_ptr100, in_ptr101, in_ptr102, in_ptr103, in_ptr104, in_ptr105, in_ptr106, in_ptr107, in_ptr108, in_ptr109, in_ptr110, in_ptr111, in_ptr112, in_ptr113, in_ptr114, in_ptr115, in_ptr116, in_ptr117, in_ptr118, in_ptr119, in_ptr120, in_ptr121, in_ptr122, in_ptr123, in_ptr124, in_ptr125, in_ptr126, in_ptr127, in_ptr128, in_ptr129, in_ptr130, in_ptr131, in_ptr132, in_ptr133, in_ptr134, in_ptr135, in_ptr136, in_ptr137, in_ptr138, in_ptr139, in_ptr140, in_ptr141, in_ptr142, in_ptr143, in_ptr144, in_ptr145, in_ptr146, in_ptr147, in_ptr148, in_ptr149, in_ptr150, in_ptr151, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_out_ptr0 + (x0), xmask)
tmp5 = tl.load(in_ptr1 + (x0), xmask)
tmp7 = tl.load(in_ptr2 + (x0), xmask)
tmp10 = tl.load(in_ptr3 + (x0), xmask)
tmp13 = tl.load(in_ptr4 + (x0), xmask)
tmp20 = tl.load(in_out_ptr1 + (x0), xmask)
tmp22 = tl.load(in_ptr5 + (x0), xmask)
tmp25 = tl.load(in_ptr6 + (x0), xmask)
tmp28 = tl.load(in_ptr7 + (x0), xmask)
tmp35 = tl.load(in_out_ptr2 + (x0), xmask)
tmp37 = tl.load(in_ptr8 + (x0), xmask)
tmp40 = tl.load(in_ptr9 + (x0), xmask)
tmp43 = tl.load(in_ptr10 + (x0), xmask)
tmp50 = tl.load(in_out_ptr3 + (x0), xmask)
tmp52 = tl.load(in_ptr11 + (x0), xmask)
tmp55 = tl.load(in_ptr12 + (x0), xmask)
tmp58 = tl.load(in_ptr13 + (x0), xmask)
tmp65 = tl.load(in_out_ptr4 + (x0), xmask)
tmp67 = tl.load(in_ptr14 + (x0), xmask)
tmp70 = tl.load(in_ptr15 + (x0), xmask)
tmp73 = tl.load(in_ptr16 + (x0), xmask)
tmp80 = tl.load(in_out_ptr5 + (x0), xmask)
tmp82 = tl.load(in_ptr17 + (x0), xmask)
tmp85 = tl.load(in_ptr18 + (x0), xmask)
tmp88 = tl.load(in_ptr19 + (x0), xmask)
tmp95 = tl.load(in_out_ptr6 + (x0), xmask)
tmp97 = tl.load(in_ptr20 + (x0), xmask)
tmp100 = tl.load(in_ptr21 + (x0), xmask)
tmp103 = tl.load(in_ptr22 + (x0), xmask)
tmp110 = tl.load(in_out_ptr7 + (x0), xmask)
tmp112 = tl.load(in_ptr23 + (x0), xmask)
tmp115 = tl.load(in_ptr24 + (x0), xmask)
tmp118 = tl.load(in_ptr25 + (x0), xmask)
tmp125 = tl.load(in_out_ptr8 + (x0), xmask)
tmp127 = tl.load(in_ptr26 + (x0), xmask)
tmp130 = tl.load(in_ptr27 + (x0), xmask)
tmp133 = tl.load(in_ptr28 + (x0), xmask)
tmp140 = tl.load(in_out_ptr9 + (x0), xmask)
tmp142 = tl.load(in_ptr29 + (x0), xmask)
tmp145 = tl.load(in_ptr30 + (x0), xmask)
tmp148 = tl.load(in_ptr31 + (x0), xmask)
tmp155 = tl.load(in_out_ptr10 + (x0), xmask)
tmp157 = tl.load(in_ptr32 + (x0), xmask)
tmp160 = tl.load(in_ptr33 + (x0), xmask)
tmp163 = tl.load(in_ptr34 + (x0), xmask)
tmp170 = tl.load(in_out_ptr11 + (x0), xmask)
tmp172 = tl.load(in_ptr35 + (x0), xmask)
tmp175 = tl.load(in_ptr36 + (x0), xmask)
tmp178 = tl.load(in_ptr37 + (x0), xmask)
tmp185 = tl.load(in_out_ptr12 + (x0), xmask)
tmp187 = tl.load(in_ptr38 + (x0), xmask)
tmp190 = tl.load(in_ptr39 + (x0), xmask)
tmp193 = tl.load(in_ptr40 + (x0), xmask)
tmp200 = tl.load(in_out_ptr13 + (x0), xmask)
tmp202 = tl.load(in_ptr41 + (x0), xmask)
tmp205 = tl.load(in_ptr42 + (x0), xmask)
tmp208 = tl.load(in_ptr43 + (x0), xmask)
tmp215 = tl.load(in_out_ptr14 + (x0), xmask)
tmp217 = tl.load(in_ptr44 + (x0), xmask)
tmp220 = tl.load(in_ptr45 + (x0), xmask)
tmp223 = tl.load(in_ptr46 + (x0), xmask)
tmp230 = tl.load(in_out_ptr15 + (x0), xmask)
tmp232 = tl.load(in_ptr47 + (x0), xmask)
tmp235 = tl.load(in_ptr48 + (x0), xmask)
tmp238 = tl.load(in_ptr49 + (x0), xmask)
tmp245 = tl.load(in_out_ptr16 + (x0), xmask)
tmp247 = tl.load(in_ptr50 + (x0), xmask)
tmp250 = tl.load(in_ptr51 + (x0), xmask)
tmp253 = tl.load(in_ptr52 + (x0), xmask)
tmp260 = tl.load(in_out_ptr17 + (x0), xmask)
tmp262 = tl.load(in_ptr53 + (x0), xmask)
tmp265 = tl.load(in_ptr54 + (x0), xmask)
tmp268 = tl.load(in_ptr55 + (x0), xmask)
tmp275 = tl.load(in_out_ptr18 + (x0), xmask)
tmp277 = tl.load(in_ptr56 + (x0), xmask)
tmp280 = tl.load(in_ptr57 + (x0), xmask)
tmp283 = tl.load(in_ptr58 + (x0), xmask)
tmp290 = tl.load(in_out_ptr19 + (x0), xmask)
tmp292 = tl.load(in_ptr59 + (x0), xmask)
tmp295 = tl.load(in_ptr60 + (x0), xmask)
tmp298 = tl.load(in_ptr61 + (x0), xmask)
tmp305 = tl.load(in_out_ptr20 + (x0), xmask)
tmp307 = tl.load(in_ptr62 + (x0), xmask)
tmp310 = tl.load(in_ptr63 + (x0), xmask)
tmp313 = tl.load(in_ptr64 + (x0), xmask)
tmp320 = tl.load(in_out_ptr21 + (x0), xmask)
tmp322 = tl.load(in_ptr65 + (x0), xmask)
tmp325 = tl.load(in_ptr66 + (x0), xmask)
tmp328 = tl.load(in_ptr67 + (x0), xmask)
tmp335 = tl.load(in_out_ptr22 + (x0), xmask)
tmp337 = tl.load(in_ptr68 + (x0), xmask)
tmp340 = tl.load(in_ptr69 + (x0), xmask)
tmp343 = tl.load(in_ptr70 + (x0), xmask)
tmp350 = tl.load(in_out_ptr23 + (x0), xmask)
tmp352 = tl.load(in_ptr71 + (x0), xmask)
tmp355 = tl.load(in_ptr72 + (x0), xmask)
tmp358 = tl.load(in_ptr73 + (x0), xmask)
tmp365 = tl.load(in_out_ptr24 + (x0), xmask)
tmp367 = tl.load(in_ptr74 + (x0), xmask)
tmp370 = tl.load(in_ptr75 + (x0), xmask)
tmp373 = tl.load(in_ptr76 + (x0), xmask)
tmp380 = tl.load(in_out_ptr25 + (x0), xmask)
tmp382 = tl.load(in_ptr77 + (x0), xmask)
tmp385 = tl.load(in_ptr78 + (x0), xmask)
tmp388 = tl.load(in_ptr79 + (x0), xmask)
tmp395 = tl.load(in_out_ptr26 + (x0), xmask)
tmp397 = tl.load(in_ptr80 + (x0), xmask)
tmp400 = tl.load(in_ptr81 + (x0), xmask)
tmp403 = tl.load(in_ptr82 + (x0), xmask)
tmp410 = tl.load(in_out_ptr27 + (x0), xmask)
tmp412 = tl.load(in_ptr83 + (x0), xmask)
tmp415 = tl.load(in_ptr84 + (x0), xmask)
tmp418 = tl.load(in_ptr85 + (x0), xmask)
tmp425 = tl.load(in_out_ptr28 + (x0), xmask)
tmp427 = tl.load(in_ptr86 + (x0), xmask)
tmp430 = tl.load(in_ptr87 + (x0), xmask)
tmp433 = tl.load(in_ptr88 + (x0), xmask)
tmp440 = tl.load(in_out_ptr29 + (x0), xmask)
tmp442 = tl.load(in_ptr89 + (x0), xmask)
tmp445 = tl.load(in_ptr90 + (x0), xmask)
tmp448 = tl.load(in_ptr91 + (x0), xmask)
tmp455 = tl.load(in_out_ptr30 + (x0), xmask)
tmp457 = tl.load(in_ptr92 + (x0), xmask)
tmp460 = tl.load(in_ptr93 + (x0), xmask)
tmp463 = tl.load(in_ptr94 + (x0), xmask)
tmp470 = tl.load(in_out_ptr31 + (x0), xmask)
tmp472 = tl.load(in_ptr95 + (x0), xmask)
tmp475 = tl.load(in_ptr96 + (x0), xmask)
tmp478 = tl.load(in_ptr97 + (x0), xmask)
tmp485 = tl.load(in_out_ptr32 + (x0), xmask)
tmp487 = tl.load(in_ptr98 + (x0), xmask)
tmp490 = tl.load(in_ptr99 + (x0), xmask)
tmp493 = tl.load(in_ptr100 + (x0), xmask)
tmp500 = tl.load(in_out_ptr33 + (x0), xmask)
tmp502 = tl.load(in_ptr101 + (x0), xmask)
tmp505 = tl.load(in_ptr102 + (x0), xmask)
tmp508 = tl.load(in_ptr103 + (x0), xmask)
tmp515 = tl.load(in_out_ptr34 + (x0), xmask)
tmp517 = tl.load(in_ptr104 + (x0), xmask)
tmp520 = tl.load(in_ptr105 + (x0), xmask)
tmp523 = tl.load(in_ptr106 + (x0), xmask)
tmp530 = tl.load(in_out_ptr35 + (x0), xmask)
tmp532 = tl.load(in_ptr107 + (x0), xmask)
tmp535 = tl.load(in_ptr108 + (x0), xmask)
tmp538 = tl.load(in_ptr109 + (x0), xmask)
tmp545 = tl.load(in_out_ptr36 + (x0), xmask)
tmp547 = tl.load(in_ptr110 + (x0), xmask)
tmp550 = tl.load(in_ptr111 + (x0), xmask)
tmp553 = tl.load(in_ptr112 + (x0), xmask)
tmp560 = tl.load(in_out_ptr37 + (x0), xmask)
tmp562 = tl.load(in_ptr113 + (x0), xmask)
tmp565 = tl.load(in_ptr114 + (x0), xmask)
tmp568 = tl.load(in_ptr115 + (x0), xmask)
tmp575 = tl.load(in_out_ptr38 + (x0), xmask)
tmp577 = tl.load(in_ptr116 + (x0), xmask)
tmp580 = tl.load(in_ptr117 + (x0), xmask)
tmp583 = tl.load(in_ptr118 + (x0), xmask)
tmp590 = tl.load(in_out_ptr39 + (x0), xmask)
tmp592 = tl.load(in_ptr119 + (x0), xmask)
tmp595 = tl.load(in_ptr120 + (x0), xmask)
tmp598 = tl.load(in_ptr121 + (x0), xmask)
tmp605 = tl.load(in_out_ptr40 + (x0), xmask)
tmp607 = tl.load(in_ptr122 + (x0), xmask)
tmp610 = tl.load(in_ptr123 + (x0), xmask)
tmp613 = tl.load(in_ptr124 + (x0), xmask)
tmp620 = tl.load(in_out_ptr41 + (x0), xmask)
tmp622 = tl.load(in_ptr125 + (x0), xmask)
tmp625 = tl.load(in_ptr126 + (x0), xmask)
tmp628 = tl.load(in_ptr127 + (x0), xmask)
tmp635 = tl.load(in_out_ptr42 + (x0), xmask)
tmp637 = tl.load(in_ptr128 + (x0), xmask)
tmp640 = tl.load(in_ptr129 + (x0), xmask)
tmp643 = tl.load(in_ptr130 + (x0), xmask)
tmp650 = tl.load(in_out_ptr43 + (x0), xmask)
tmp652 = tl.load(in_ptr131 + (x0), xmask)
tmp655 = tl.load(in_ptr132 + (x0), xmask)
tmp658 = tl.load(in_ptr133 + (x0), xmask)
tmp665 = tl.load(in_out_ptr44 + (x0), xmask)
tmp667 = tl.load(in_ptr134 + (x0), xmask)
tmp670 = tl.load(in_ptr135 + (x0), xmask)
tmp673 = tl.load(in_ptr136 + (x0), xmask)
tmp680 = tl.load(in_out_ptr45 + (x0), xmask)
tmp682 = tl.load(in_ptr137 + (x0), xmask)
tmp685 = tl.load(in_ptr138 + (x0), xmask)
tmp688 = tl.load(in_ptr139 + (x0), xmask)
tmp695 = tl.load(in_out_ptr46 + (x0), xmask)
tmp697 = tl.load(in_ptr140 + (x0), xmask)
tmp700 = tl.load(in_ptr141 + (x0), xmask)
tmp703 = tl.load(in_ptr142 + (x0), xmask)
tmp710 = tl.load(in_out_ptr47 + (x0), xmask)
tmp712 = tl.load(in_ptr143 + (x0), xmask)
tmp715 = tl.load(in_ptr144 + (x0), xmask)
tmp718 = tl.load(in_ptr145 + (x0), xmask)
tmp725 = tl.load(in_out_ptr48 + (x0), xmask)
tmp727 = tl.load(in_ptr146 + (x0), xmask)
tmp730 = tl.load(in_ptr147 + (x0), xmask)
tmp733 = tl.load(in_ptr148 + (x0), xmask)
tmp740 = tl.load(in_out_ptr49 + (x0), xmask)
tmp742 = tl.load(in_ptr149 + (x0), xmask)
tmp745 = tl.load(in_ptr150 + (x0), xmask)
tmp748 = tl.load(in_ptr151 + (x0), xmask)
tmp2 = tmp0 - tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = -tmp5
tmp8 = -tmp7
tmp9 = triton_helpers.minimum(tmp6, tmp8)
tmp11 = -tmp10
tmp12 = triton_helpers.minimum(tmp9, tmp11)
tmp14 = tmp12 - tmp13
tmp15 = triton_helpers.maximum(tmp3, tmp14)
tmp16 = tmp4 * tmp15
tmp17 = tmp15 - tmp16
tmp18 = triton_helpers.maximum(tmp3, tmp17)
tmp19 = tmp4 + tmp18
tmp21 = -tmp20
tmp23 = -tmp22
tmp24 = triton_helpers.minimum(tmp21, tmp23)
tmp26 = -tmp25
tmp27 = triton_helpers.minimum(tmp24, tmp26)
tmp29 = tmp27 - tmp28
tmp30 = triton_helpers.maximum(tmp3, tmp29)
tmp31 = tmp19 * tmp30
tmp32 = tmp30 - tmp31
tmp33 = triton_helpers.maximum(tmp3, tmp32)
tmp34 = tmp19 + tmp33
tmp36 = -tmp35
tmp38 = -tmp37
tmp39 = triton_helpers.minimum(tmp36, tmp38)
tmp41 = -tmp40
tmp42 = triton_helpers.minimum(tmp39, tmp41)
tmp44 = tmp42 - tmp43
tmp45 = triton_helpers.maximum(tmp3, tmp44)
tmp46 = tmp34 * tmp45
tmp47 = tmp45 - tmp46
tmp48 = triton_helpers.maximum(tmp3, tmp47)
tmp49 = tmp34 + tmp48
tmp51 = -tmp50
tmp53 = -tmp52
tmp54 = triton_helpers.minimum(tmp51, tmp53)
tmp56 = -tmp55
tmp57 = triton_helpers.minimum(tmp54, tmp56)
tmp59 = tmp57 - tmp58
tmp60 = triton_helpers.maximum(tmp3, tmp59)
tmp61 = tmp49 * tmp60
tmp62 = tmp60 - tmp61
tmp63 = triton_helpers.maximum(tmp3, tmp62)
tmp64 = tmp49 + tmp63
tmp66 = -tmp65
tmp68 = -tmp67
tmp69 = triton_helpers.minimum(tmp66, tmp68)
tmp71 = -tmp70
tmp72 = triton_helpers.minimum(tmp69, tmp71)
tmp74 = tmp72 - tmp73
tmp75 = triton_helpers.maximum(tmp3, tmp74)
tmp76 = tmp64 * tmp75
tmp77 = tmp75 - tmp76
tmp78 = triton_helpers.maximum(tmp3, tmp77)
tmp79 = tmp64 + tmp78
tmp81 = -tmp80
tmp83 = -tmp82
tmp84 = triton_helpers.minimum(tmp81, tmp83)
tmp86 = -tmp85
tmp87 = triton_helpers.minimum(tmp84, tmp86)
tmp89 = tmp87 - tmp88
tmp90 = triton_helpers.maximum(tmp3, tmp89)
tmp91 = tmp79 * tmp90
tmp92 = tmp90 - tmp91
tmp93 = triton_helpers.maximum(tmp3, tmp92)
tmp94 = tmp79 + tmp93
tmp96 = -tmp95
tmp98 = -tmp97
tmp99 = triton_helpers.minimum(tmp96, tmp98)
tmp101 = -tmp100
tmp102 = triton_helpers.minimum(tmp99, tmp101)
tmp104 = tmp102 - tmp103
tmp105 = triton_helpers.maximum(tmp3, tmp104)
tmp106 = tmp94 * tmp105
tmp107 = tmp105 - tmp106
tmp108 = triton_helpers.maximum(tmp3, tmp107)
tmp109 = tmp94 + tmp108
tmp111 = -tmp110
tmp113 = -tmp112
tmp114 = triton_helpers.minimum(tmp111, tmp113)
tmp116 = -tmp115
tmp117 = triton_helpers.minimum(tmp114, tmp116)
tmp119 = tmp117 - tmp118
tmp120 = triton_helpers.maximum(tmp3, tmp119)
tmp121 = tmp109 * tmp120
tmp122 = tmp120 - tmp121
tmp123 = triton_helpers.maximum(tmp3, tmp122)
tmp124 = tmp109 + tmp123
tmp126 = -tmp125
tmp128 = -tmp127
tmp129 = triton_helpers.minimum(tmp126, tmp128)
tmp131 = -tmp130
tmp132 = triton_helpers.minimum(tmp129, tmp131)
tmp134 = tmp132 - tmp133
tmp135 = triton_helpers.maximum(tmp3, tmp134)
tmp136 = tmp124 * tmp135
tmp137 = tmp135 - tmp136
tmp138 = triton_helpers.maximum(tmp3, tmp137)
tmp139 = tmp124 + tmp138
tmp141 = -tmp140
tmp143 = -tmp142
tmp144 = triton_helpers.minimum(tmp141, tmp143)
tmp146 = -tmp145
tmp147 = triton_helpers.minimum(tmp144, tmp146)
tmp149 = tmp147 - tmp148
tmp150 = triton_helpers.maximum(tmp3, tmp149)
tmp151 = tmp139 * tmp150
tmp152 = tmp150 - tmp151
tmp153 = triton_helpers.maximum(tmp3, tmp152)
tmp154 = tmp139 + tmp153
tmp156 = -tmp155
tmp158 = -tmp157
tmp159 = triton_helpers.minimum(tmp156, tmp158)
tmp161 = -tmp160
tmp162 = triton_helpers.minimum(tmp159, tmp161)
tmp164 = tmp162 - tmp163
tmp165 = triton_helpers.maximum(tmp3, tmp164)
tmp166 = tmp154 * tmp165
tmp167 = tmp165 - tmp166
tmp168 = triton_helpers.maximum(tmp3, tmp167)
tmp169 = tmp154 + tmp168
tmp171 = -tmp170
tmp173 = -tmp172
tmp174 = triton_helpers.minimum(tmp171, tmp173)
tmp176 = -tmp175
tmp177 = triton_helpers.minimum(tmp174, tmp176)
tmp179 = tmp177 - tmp178
tmp180 = triton_helpers.maximum(tmp3, tmp179)
tmp181 = tmp169 * tmp180
tmp182 = tmp180 - tmp181
tmp183 = triton_helpers.maximum(tmp3, tmp182)
tmp184 = tmp169 + tmp183
tmp186 = -tmp185
tmp188 = -tmp187
tmp189 = triton_helpers.minimum(tmp186, tmp188)
tmp191 = -tmp190
tmp192 = triton_helpers.minimum(tmp189, tmp191)
tmp194 = tmp192 - tmp193
tmp195 = triton_helpers.maximum(tmp3, tmp194)
tmp196 = tmp184 * tmp195
tmp197 = tmp195 - tmp196
tmp198 = triton_helpers.maximum(tmp3, tmp197)
tmp199 = tmp184 + tmp198
tmp201 = -tmp200
tmp203 = -tmp202
tmp204 = triton_helpers.minimum(tmp201, tmp203)
tmp206 = -tmp205
tmp207 = triton_helpers.minimum(tmp204, tmp206)
tmp209 = tmp207 - tmp208
tmp210 = triton_helpers.maximum(tmp3, tmp209)
tmp211 = tmp199 * tmp210
tmp212 = tmp210 - tmp211
tmp213 = triton_helpers.maximum(tmp3, tmp212)
tmp214 = tmp199 + tmp213
tmp216 = -tmp215
tmp218 = -tmp217
tmp219 = triton_helpers.minimum(tmp216, tmp218)
tmp221 = -tmp220
tmp222 = triton_helpers.minimum(tmp219, tmp221)
tmp224 = tmp222 - tmp223
tmp225 = triton_helpers.maximum(tmp3, tmp224)
tmp226 = tmp214 * tmp225
tmp227 = tmp225 - tmp226
tmp228 = triton_helpers.maximum(tmp3, tmp227)
tmp229 = tmp214 + tmp228
tmp231 = -tmp230
tmp233 = -tmp232
tmp234 = triton_helpers.minimum(tmp231, tmp233)
tmp236 = -tmp235
tmp237 = triton_helpers.minimum(tmp234, tmp236)
tmp239 = tmp237 - tmp238
tmp240 = triton_helpers.maximum(tmp3, tmp239)
tmp241 = tmp229 * tmp240
tmp242 = tmp240 - tmp241
tmp243 = triton_helpers.maximum(tmp3, tmp242)
tmp244 = tmp229 + tmp243
tmp246 = -tmp245
tmp248 = -tmp247
tmp249 = triton_helpers.minimum(tmp246, tmp248)
tmp251 = -tmp250
tmp252 = triton_helpers.minimum(tmp249, tmp251)
tmp254 = tmp252 - tmp253
tmp255 = triton_helpers.maximum(tmp3, tmp254)
tmp256 = tmp244 * tmp255
tmp257 = tmp255 - tmp256
tmp258 = triton_helpers.maximum(tmp3, tmp257)
tmp259 = tmp244 + tmp258
tmp261 = -tmp260
tmp263 = -tmp262
tmp264 = triton_helpers.minimum(tmp261, tmp263)
tmp266 = -tmp265
tmp267 = triton_helpers.minimum(tmp264, tmp266)
tmp269 = tmp267 - tmp268
tmp270 = triton_helpers.maximum(tmp3, tmp269)
tmp271 = tmp259 * tmp270
tmp272 = tmp270 - tmp271
tmp273 = triton_helpers.maximum(tmp3, tmp272)
tmp274 = tmp259 + tmp273
tmp276 = -tmp275
tmp278 = -tmp277
tmp279 = triton_helpers.minimum(tmp276, tmp278)
tmp281 = -tmp280
tmp282 = triton_helpers.minimum(tmp279, tmp281)
tmp284 = tmp282 - tmp283
tmp285 = triton_helpers.maximum(tmp3, tmp284)
tmp286 = tmp274 * tmp285
tmp287 = tmp285 - tmp286
tmp288 = triton_helpers.maximum(tmp3, tmp287)
tmp289 = tmp274 + tmp288
tmp291 = -tmp290
tmp293 = -tmp292
tmp294 = triton_helpers.minimum(tmp291, tmp293)
tmp296 = -tmp295
tmp297 = triton_helpers.minimum(tmp294, tmp296)
tmp299 = tmp297 - tmp298
tmp300 = triton_helpers.maximum(tmp3, tmp299)
tmp301 = tmp289 * tmp300
tmp302 = tmp300 - tmp301
tmp303 = triton_helpers.maximum(tmp3, tmp302)
tmp304 = tmp289 + tmp303
tmp306 = -tmp305
tmp308 = -tmp307
tmp309 = triton_helpers.minimum(tmp306, tmp308)
tmp311 = -tmp310
tmp312 = triton_helpers.minimum(tmp309, tmp311)
tmp314 = tmp312 - tmp313
tmp315 = triton_helpers.maximum(tmp3, tmp314)
tmp316 = tmp304 * tmp315
tmp317 = tmp315 - tmp316
tmp318 = triton_helpers.maximum(tmp3, tmp317)
tmp319 = tmp304 + tmp318
tmp321 = -tmp320
tmp323 = -tmp322
tmp324 = triton_helpers.minimum(tmp321, tmp323)
tmp326 = -tmp325
tmp327 = triton_helpers.minimum(tmp324, tmp326)
tmp329 = tmp327 - tmp328
tmp330 = triton_helpers.maximum(tmp3, tmp329)
tmp331 = tmp319 * tmp330
tmp332 = tmp330 - tmp331
tmp333 = triton_helpers.maximum(tmp3, tmp332)
tmp334 = tmp319 + tmp333
tmp336 = -tmp335
tmp338 = -tmp337
tmp339 = triton_helpers.minimum(tmp336, tmp338)
tmp341 = -tmp340
tmp342 = triton_helpers.minimum(tmp339, tmp341)
tmp344 = tmp342 - tmp343
tmp345 = triton_helpers.maximum(tmp3, tmp344)
tmp346 = tmp334 * tmp345
tmp347 = tmp345 - tmp346
tmp348 = triton_helpers.maximum(tmp3, tmp347)
tmp349 = tmp334 + tmp348
tmp351 = -tmp350
tmp353 = -tmp352
tmp354 = triton_helpers.minimum(tmp351, tmp353)
tmp356 = -tmp355
tmp357 = triton_helpers.minimum(tmp354, tmp356)
tmp359 = tmp357 - tmp358
tmp360 = triton_helpers.maximum(tmp3, tmp359)
tmp361 = tmp349 * tmp360
tmp362 = tmp360 - tmp361
tmp363 = triton_helpers.maximum(tmp3, tmp362)
tmp364 = tmp349 + tmp363
tmp366 = -tmp365
tmp368 = -tmp367
tmp369 = triton_helpers.minimum(tmp366, tmp368)
tmp371 = -tmp370
tmp372 = triton_helpers.minimum(tmp369, tmp371)
tmp374 = tmp372 - tmp373
tmp375 = triton_helpers.maximum(tmp3, tmp374)
tmp376 = tmp364 * tmp375
tmp377 = tmp375 - tmp376
tmp378 = triton_helpers.maximum(tmp3, tmp377)
tmp379 = tmp364 + tmp378
tmp381 = -tmp380
tmp383 = -tmp382
tmp384 = triton_helpers.minimum(tmp381, tmp383)
tmp386 = -tmp385
tmp387 = triton_helpers.minimum(tmp384, tmp386)
tmp389 = tmp387 - tmp388
tmp390 = triton_helpers.maximum(tmp3, tmp389)
tmp391 = tmp379 * tmp390
tmp392 = tmp390 - tmp391
tmp393 = triton_helpers.maximum(tmp3, tmp392)
tmp394 = tmp379 + tmp393
tmp396 = -tmp395
tmp398 = -tmp397
tmp399 = triton_helpers.minimum(tmp396, tmp398)
tmp401 = -tmp400
tmp402 = triton_helpers.minimum(tmp399, tmp401)
tmp404 = tmp402 - tmp403
tmp405 = triton_helpers.maximum(tmp3, tmp404)
tmp406 = tmp394 * tmp405
tmp407 = tmp405 - tmp406
tmp408 = triton_helpers.maximum(tmp3, tmp407)
tmp409 = tmp394 + tmp408
tmp411 = -tmp410
tmp413 = -tmp412
tmp414 = triton_helpers.minimum(tmp411, tmp413)
tmp416 = -tmp415
tmp417 = triton_helpers.minimum(tmp414, tmp416)
tmp419 = tmp417 - tmp418
tmp420 = triton_helpers.maximum(tmp3, tmp419)
tmp421 = tmp409 * tmp420
tmp422 = tmp420 - tmp421
tmp423 = triton_helpers.maximum(tmp3, tmp422)
tmp424 = tmp409 + tmp423
tmp426 = -tmp425
tmp428 = -tmp427
tmp429 = triton_helpers.minimum(tmp426, tmp428)
tmp431 = -tmp430
tmp432 = triton_helpers.minimum(tmp429, tmp431)
tmp434 = tmp432 - tmp433
tmp435 = triton_helpers.maximum(tmp3, tmp434)
tmp436 = tmp424 * tmp435
tmp437 = tmp435 - tmp436
tmp438 = triton_helpers.maximum(tmp3, tmp437)
tmp439 = tmp424 + tmp438
tmp441 = -tmp440
tmp443 = -tmp442
tmp444 = triton_helpers.minimum(tmp441, tmp443)
tmp446 = -tmp445
tmp447 = triton_helpers.minimum(tmp444, tmp446)
tmp449 = tmp447 - tmp448
tmp450 = triton_helpers.maximum(tmp3, tmp449)
tmp451 = tmp439 * tmp450
tmp452 = tmp450 - tmp451
tmp453 = triton_helpers.maximum(tmp3, tmp452)
tmp454 = tmp439 + tmp453
tmp456 = -tmp455
tmp458 = -tmp457
tmp459 = triton_helpers.minimum(tmp456, tmp458)
tmp461 = -tmp460
tmp462 = triton_helpers.minimum(tmp459, tmp461)
tmp464 = tmp462 - tmp463
tmp465 = triton_helpers.maximum(tmp3, tmp464)
tmp466 = tmp454 * tmp465
tmp467 = tmp465 - tmp466
tmp468 = triton_helpers.maximum(tmp3, tmp467)
tmp469 = tmp454 + tmp468
tmp471 = -tmp470
tmp473 = -tmp472
tmp474 = triton_helpers.minimum(tmp471, tmp473)
tmp476 = -tmp475
tmp477 = triton_helpers.minimum(tmp474, tmp476)
tmp479 = tmp477 - tmp478
tmp480 = triton_helpers.maximum(tmp3, tmp479)
tmp481 = tmp469 * tmp480
tmp482 = tmp480 - tmp481
tmp483 = triton_helpers.maximum(tmp3, tmp482)
tmp484 = tmp469 + tmp483
tmp486 = -tmp485
tmp488 = -tmp487
tmp489 = triton_helpers.minimum(tmp486, tmp488)
tmp491 = -tmp490
tmp492 = triton_helpers.minimum(tmp489, tmp491)
tmp494 = tmp492 - tmp493
tmp495 = triton_helpers.maximum(tmp3, tmp494)
tmp496 = tmp484 * tmp495
tmp497 = tmp495 - tmp496
tmp498 = triton_helpers.maximum(tmp3, tmp497)
tmp499 = tmp484 + tmp498
tmp501 = -tmp500
tmp503 = -tmp502
tmp504 = triton_helpers.minimum(tmp501, tmp503)
tmp506 = -tmp505
tmp507 = triton_helpers.minimum(tmp504, tmp506)
tmp509 = tmp507 - tmp508
tmp510 = triton_helpers.maximum(tmp3, tmp509)
tmp511 = tmp499 * tmp510
tmp512 = tmp510 - tmp511
tmp513 = triton_helpers.maximum(tmp3, tmp512)
tmp514 = tmp499 + tmp513
tmp516 = -tmp515
tmp518 = -tmp517
tmp519 = triton_helpers.minimum(tmp516, tmp518)
tmp521 = -tmp520
tmp522 = triton_helpers.minimum(tmp519, tmp521)
tmp524 = tmp522 - tmp523
tmp525 = triton_helpers.maximum(tmp3, tmp524)
tmp526 = tmp514 * tmp525
tmp527 = tmp525 - tmp526
tmp528 = triton_helpers.maximum(tmp3, tmp527)
tmp529 = tmp514 + tmp528
tmp531 = -tmp530
tmp533 = -tmp532
tmp534 = triton_helpers.minimum(tmp531, tmp533)
tmp536 = -tmp535
tmp537 = triton_helpers.minimum(tmp534, tmp536)
tmp539 = tmp537 - tmp538
tmp540 = triton_helpers.maximum(tmp3, tmp539)
tmp541 = tmp529 * tmp540
tmp542 = tmp540 - tmp541
tmp543 = triton_helpers.maximum(tmp3, tmp542)
tmp544 = tmp529 + tmp543
tmp546 = -tmp545
tmp548 = -tmp547
tmp549 = triton_helpers.minimum(tmp546, tmp548)
tmp551 = -tmp550
tmp552 = triton_helpers.minimum(tmp549, tmp551)
tmp554 = tmp552 - tmp553
tmp555 = triton_helpers.maximum(tmp3, tmp554)
tmp556 = tmp544 * tmp555
tmp557 = tmp555 - tmp556
tmp558 = triton_helpers.maximum(tmp3, tmp557)
tmp559 = tmp544 + tmp558
tmp561 = -tmp560
tmp563 = -tmp562
tmp564 = triton_helpers.minimum(tmp561, tmp563)
tmp566 = -tmp565
tmp567 = triton_helpers.minimum(tmp564, tmp566)
tmp569 = tmp567 - tmp568
tmp570 = triton_helpers.maximum(tmp3, tmp569)
tmp571 = tmp559 * tmp570
tmp572 = tmp570 - tmp571
tmp573 = triton_helpers.maximum(tmp3, tmp572)
tmp574 = tmp559 + tmp573
tmp576 = -tmp575
tmp578 = -tmp577
tmp579 = triton_helpers.minimum(tmp576, tmp578)
tmp581 = -tmp580
tmp582 = triton_helpers.minimum(tmp579, tmp581)
tmp584 = tmp582 - tmp583
tmp585 = triton_helpers.maximum(tmp3, tmp584)
tmp586 = tmp574 * tmp585
tmp587 = tmp585 - tmp586
tmp588 = triton_helpers.maximum(tmp3, tmp587)
tmp589 = tmp574 + tmp588
tmp591 = -tmp590
tmp593 = -tmp592
tmp594 = triton_helpers.minimum(tmp591, tmp593)
tmp596 = -tmp595
tmp597 = triton_helpers.minimum(tmp594, tmp596)
tmp599 = tmp597 - tmp598
tmp600 = triton_helpers.maximum(tmp3, tmp599)
tmp601 = tmp589 * tmp600
tmp602 = tmp600 - tmp601
tmp603 = triton_helpers.maximum(tmp3, tmp602)
tmp604 = tmp589 + tmp603
tmp606 = -tmp605
tmp608 = -tmp607
tmp609 = triton_helpers.minimum(tmp606, tmp608)
tmp611 = -tmp610
tmp612 = triton_helpers.minimum(tmp609, tmp611)
tmp614 = tmp612 - tmp613
tmp615 = triton_helpers.maximum(tmp3, tmp614)
tmp616 = tmp604 * tmp615
tmp617 = tmp615 - tmp616
tmp618 = triton_helpers.maximum(tmp3, tmp617)
tmp619 = tmp604 + tmp618
tmp621 = -tmp620
tmp623 = -tmp622
tmp624 = triton_helpers.minimum(tmp621, tmp623)
tmp626 = -tmp625
tmp627 = triton_helpers.minimum(tmp624, tmp626)
tmp629 = tmp627 - tmp628
tmp630 = triton_helpers.maximum(tmp3, tmp629)
tmp631 = tmp619 * tmp630
tmp632 = tmp630 - tmp631
tmp633 = triton_helpers.maximum(tmp3, tmp632)
tmp634 = tmp619 + tmp633
tmp636 = -tmp635
tmp638 = -tmp637
tmp639 = triton_helpers.minimum(tmp636, tmp638)
tmp641 = -tmp640
tmp642 = triton_helpers.minimum(tmp639, tmp641)
tmp644 = tmp642 - tmp643
tmp645 = triton_helpers.maximum(tmp3, tmp644)
tmp646 = tmp634 * tmp645
tmp647 = tmp645 - tmp646
tmp648 = triton_helpers.maximum(tmp3, tmp647)
tmp649 = tmp634 + tmp648
tmp651 = -tmp650
tmp653 = -tmp652
tmp654 = triton_helpers.minimum(tmp651, tmp653)
tmp656 = -tmp655
tmp657 = triton_helpers.minimum(tmp654, tmp656)
tmp659 = tmp657 - tmp658
tmp660 = triton_helpers.maximum(tmp3, tmp659)
tmp661 = tmp649 * tmp660
tmp662 = tmp660 - tmp661
tmp663 = triton_helpers.maximum(tmp3, tmp662)
tmp664 = tmp649 + tmp663
tmp666 = -tmp665
tmp668 = -tmp667
tmp669 = triton_helpers.minimum(tmp666, tmp668)
tmp671 = -tmp670
tmp672 = triton_helpers.minimum(tmp669, tmp671)
tmp674 = tmp672 - tmp673
tmp675 = triton_helpers.maximum(tmp3, tmp674)
tmp676 = tmp664 * tmp675
tmp677 = tmp675 - tmp676
tmp678 = triton_helpers.maximum(tmp3, tmp677)
tmp679 = tmp664 + tmp678
tmp681 = -tmp680
tmp683 = -tmp682
tmp684 = triton_helpers.minimum(tmp681, tmp683)
tmp686 = -tmp685
tmp687 = triton_helpers.minimum(tmp684, tmp686)
tmp689 = tmp687 - tmp688
tmp690 = triton_helpers.maximum(tmp3, tmp689)
tmp691 = tmp679 * tmp690
tmp692 = tmp690 - tmp691
tmp693 = triton_helpers.maximum(tmp3, tmp692)
tmp694 = tmp679 + tmp693
tmp696 = -tmp695
tmp698 = -tmp697
tmp699 = triton_helpers.minimum(tmp696, tmp698)
tmp701 = -tmp700
tmp702 = triton_helpers.minimum(tmp699, tmp701)
tmp704 = tmp702 - tmp703
tmp705 = triton_helpers.maximum(tmp3, tmp704)
tmp706 = tmp694 * tmp705
tmp707 = tmp705 - tmp706
tmp708 = triton_helpers.maximum(tmp3, tmp707)
tmp709 = tmp694 + tmp708
tmp711 = -tmp710
tmp713 = -tmp712
tmp714 = triton_helpers.minimum(tmp711, tmp713)
tmp716 = -tmp715
tmp717 = triton_helpers.minimum(tmp714, tmp716)
tmp719 = tmp717 - tmp718
tmp720 = triton_helpers.maximum(tmp3, tmp719)
tmp721 = tmp709 * tmp720
tmp722 = tmp720 - tmp721
tmp723 = triton_helpers.maximum(tmp3, tmp722)
tmp724 = tmp709 + tmp723
tmp726 = -tmp725
tmp728 = -tmp727
tmp729 = triton_helpers.minimum(tmp726, tmp728)
tmp731 = -tmp730
tmp732 = triton_helpers.minimum(tmp729, tmp731)
tmp734 = tmp732 - tmp733
tmp735 = triton_helpers.maximum(tmp3, tmp734)
tmp736 = tmp724 * tmp735
tmp737 = tmp735 - tmp736
tmp738 = triton_helpers.maximum(tmp3, tmp737)
tmp739 = tmp724 + tmp738
tmp741 = -tmp740
tmp743 = -tmp742
tmp744 = triton_helpers.minimum(tmp741, tmp743)
tmp746 = -tmp745
tmp747 = triton_helpers.minimum(tmp744, tmp746)
tmp749 = tmp747 - tmp748
tmp750 = triton_helpers.maximum(tmp3, tmp749)
tmp751 = tmp739 * tmp750
tmp752 = tmp750 - tmp751
tmp753 = triton_helpers.maximum(tmp3, tmp752)
tmp754 = tmp739 + tmp753
tl.store(in_out_ptr49 + (x0), tmp754, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/lj/clj4linx7ot7tqznp76xkrp65jwk2xskp6l567hzs75w3qtedynq.py
# Topologically Sorted Source Nodes: [p1_1, p3_1, min_3, p2_1, img, p1_3, p3_3, min_7, p2_3, img_1, p1_5, p3_5, min_11, p2_5, img_2, p1_7, p3_7, min_15, p2_7, img_3, p1_9, p3_9, min_19, p2_9, img_4, p1_11, p3_11, min_23, p2_11, img_5, p1_13, p3_13, min_27, p2_13, img_6, p1_15, p3_15, min_31, p2_15, img_7, p1_17, p3_17, min_35, p2_17, img_8, p1_19, p3_19, min_39, p2_19, img_9, p1_21, p3_21, min_43, p2_21, img_10, p1_23, p3_23, min_47, p2_23, img_11, p1_25, p3_25, min_51, p2_25, img_12, p1_27, p3_27, min_55, p2_27, img_13, p1_29, p3_29, min_59, p2_29, img_14, p1_31, p3_31, min_63, p2_31, img_15, p1_33, p3_33, min_67, p2_33, img_16, p1_35, p3_35, min_71, p2_35, img_17, p1_37, p3_37, min_75, p2_37, img_18, p1_39, p3_39, min_79, p2_39, img_19, p1_41, p3_41, min_83, p2_41, img_20, p1_43, p3_43, min_87, p2_43, img_21, p1_45, p3_45, min_91, p2_45, img_22, p1_47, p3_47, min_95, p2_47, img_23, p1_49, p3_49, min_99, p2_49, img_24, p1_51, p3_51, min_103, p2_51, img_25, p1_53, p3_53, min_107, p2_53, img_26, p1_55, p3_55, min_111, p2_55, img_27, p1_57, p3_57, min_115, p2_57, img_28, p1_59, p3_59, min_119, p2_59, img_29, p1_61, p3_61, min_123, p2_61, img_30, p1_63, p3_63, min_127, p2_63, img_31, p1_65, p3_65, min_131, p2_65, img_32, p1_67, p3_67, min_135, p2_67, img_33, p1_69, p3_69, min_139, p2_69, img_34, p1_71, p3_71, min_143, p2_71, img_35, p1_73, p3_73, min_147, p2_73, img_36, p1_75, p3_75, min_151, p2_75, img_37, p1_77, p3_77, min_155, p2_77, img_38, p1_79, p3_79, min_159, p2_79, img_39, p1_81, p3_81, min_163, p2_81, img_40, p1_83, p3_83, min_167, p2_83, img_41, p1_85, p3_85, min_171, p2_85, img_42, p1_87, p3_87, min_175, p2_87, img_43, p1_89, p3_89, min_179, p2_89, img_44, p1_91, p3_91, min_183, p2_91, img_45, p1_93, p3_93, min_187, p2_93, img_46, p1_95, p3_95, min_191, p2_95, img_47, p1_97, p3_97, min_195, p2_97, img_48, p1_99, p3_99, min_199, p2_99, img_49, sub, skel, sub_1, delta, mul, sub_2, relu_2, skel_1, sub_3, delta_1, mul_1, sub_4, relu_4, skel_2, sub_5, delta_2, mul_2, sub_6, relu_6, skel_3, sub_7, delta_3, mul_3, sub_8, relu_8, skel_4, sub_9, delta_4, mul_4, sub_10, relu_10, skel_5, sub_11, delta_5, mul_5, sub_12, relu_12, skel_6, sub_13, delta_6, mul_6, sub_14, relu_14, skel_7, sub_15, delta_7, mul_7, sub_16, relu_16, skel_8, sub_17, delta_8, mul_8, sub_18, relu_18, skel_9, sub_19, delta_9, mul_9, sub_20, relu_20, skel_10, sub_21, delta_10, mul_10, sub_22, relu_22, skel_11, sub_23, delta_11, mul_11, sub_24, relu_24, skel_12, sub_25, delta_12, mul_12, sub_26, relu_26, skel_13, sub_27, delta_13, mul_13, sub_28, relu_28, skel_14, sub_29, delta_14, mul_14, sub_30, relu_30, skel_15, sub_31, delta_15, mul_15, sub_32, relu_32, skel_16, sub_33, delta_16, mul_16, sub_34, relu_34, skel_17, sub_35, delta_17, mul_17, sub_36, relu_36, skel_18, sub_37, delta_18, mul_18, sub_38, relu_38, skel_19, sub_39, delta_19, mul_19, sub_40, relu_40, skel_20, sub_41, delta_20, mul_20, sub_42, relu_42, skel_21, sub_43, delta_21, mul_21, sub_44, relu_44, skel_22, sub_45, delta_22, mul_22, sub_46, relu_46, skel_23, sub_47, delta_23, mul_23, sub_48, relu_48, skel_24, sub_49, delta_24, mul_24, sub_50, relu_50, skel_25, sub_51, delta_25, mul_25, sub_52, relu_52, skel_26, sub_53, delta_26, mul_26, sub_54, relu_54, skel_27, sub_55, delta_27, mul_27, sub_56, relu_56, skel_28, sub_57, delta_28, mul_28, sub_58, relu_58, skel_29, sub_59, delta_29, mul_29, sub_60, relu_60, skel_30, sub_61, delta_30, mul_30, sub_62, relu_62, skel_31, sub_63, delta_31, mul_31, sub_64, relu_64, skel_32, sub_65, delta_32, mul_32, sub_66, relu_66, skel_33, sub_67, delta_33, mul_33, sub_68, relu_68, skel_34, sub_69, delta_34, mul_34, sub_70, relu_70, skel_35, sub_71, delta_35, mul_35, sub_72, relu_72, skel_36, sub_73, delta_36, mul_36, sub_74, relu_74, skel_37, sub_75, delta_37, mul_37, sub_76, relu_76, skel_38, sub_77, delta_38, mul_38, sub_78, relu_78, skel_39, sub_79, delta_39, mul_39, sub_80, relu_80, skel_40, sub_81, delta_40, mul_40, sub_82, relu_82, skel_41, sub_83, delta_41, mul_41, sub_84, relu_84, skel_42, sub_85, delta_42, mul_42, sub_86, relu_86, skel_43, sub_87, delta_43, mul_43, sub_88, relu_88, skel_44, sub_89, delta_44, mul_44, sub_90, relu_90, skel_45, sub_91, delta_45, mul_45, sub_92, relu_92, skel_46, sub_93, delta_46, mul_46, sub_94, relu_94, skel_47, sub_95, delta_47, mul_47, sub_96, relu_96, skel_48, sub_97, delta_48, mul_48, sub_98, relu_98, skel_49, sub_99, delta_49, mul_49, sub_100, relu_100, skel_50], Original ATen: [aten.neg, aten.minimum, aten.sub, aten.relu, aten.mul, aten.add]
# Source node to ATen node mapping:
# delta => relu_1
# delta_1 => relu_3
# delta_10 => relu_21
# delta_11 => relu_23
# delta_12 => relu_25
# delta_13 => relu_27
# delta_14 => relu_29
# delta_15 => relu_31
# delta_16 => relu_33
# delta_17 => relu_35
# delta_18 => relu_37
# delta_19 => relu_39
# delta_2 => relu_5
# delta_20 => relu_41
# delta_21 => relu_43
# delta_22 => relu_45
# delta_23 => relu_47
# delta_24 => relu_49
# delta_25 => relu_51
# delta_26 => relu_53
# delta_27 => relu_55
# delta_28 => relu_57
# delta_29 => relu_59
# delta_3 => relu_7
# delta_30 => relu_61
# delta_31 => relu_63
# delta_32 => relu_65
# delta_33 => relu_67
# delta_34 => relu_69
# delta_35 => relu_71
# delta_36 => relu_73
# delta_37 => relu_75
# delta_38 => relu_77
# delta_39 => relu_79
# delta_4 => relu_9
# delta_40 => relu_81
# delta_41 => relu_83
# delta_42 => relu_85
# delta_43 => relu_87
# delta_44 => relu_89
# delta_45 => relu_91
# delta_46 => relu_93
# delta_47 => relu_95
# delta_48 => relu_97
# delta_49 => relu_99
# delta_5 => relu_11
# delta_6 => relu_13
# delta_7 => relu_15
# delta_8 => relu_17
# delta_9 => relu_19
# img => minimum_3
# img_1 => minimum_7
# img_10 => minimum_43
# img_11 => minimum_47
# img_12 => minimum_51
# img_13 => minimum_55
# img_14 => minimum_59
# img_15 => minimum_63
# img_16 => minimum_67
# img_17 => minimum_71
# img_18 => minimum_75
# img_19 => minimum_79
# img_2 => minimum_11
# img_20 => minimum_83
# img_21 => minimum_87
# img_22 => minimum_91
# img_23 => minimum_95
# img_24 => minimum_99
# img_25 => minimum_103
# img_26 => minimum_107
# img_27 => minimum_111
# img_28 => minimum_115
# img_29 => minimum_119
# img_3 => minimum_15
# img_30 => minimum_123
# img_31 => minimum_127
# img_32 => minimum_131
# img_33 => minimum_135
# img_34 => minimum_139
# img_35 => minimum_143
# img_36 => minimum_147
# img_37 => minimum_151
# img_38 => minimum_155
# img_39 => minimum_159
# img_4 => minimum_19
# img_40 => minimum_163
# img_41 => minimum_167
# img_42 => minimum_171
# img_43 => minimum_175
# img_44 => minimum_179
# img_45 => minimum_183
# img_46 => minimum_187
# img_47 => minimum_191
# img_48 => minimum_195
# img_49 => minimum_199
# img_5 => minimum_23
# img_6 => minimum_27
# img_7 => minimum_31
# img_8 => minimum_35
# img_9 => minimum_39
# min_103 => minimum_102
# min_107 => minimum_106
# min_11 => minimum_10
# min_111 => minimum_110
# min_115 => minimum_114
# min_119 => minimum_118
# min_123 => minimum_122
# min_127 => minimum_126
# min_131 => minimum_130
# min_135 => minimum_134
# min_139 => minimum_138
# min_143 => minimum_142
# min_147 => minimum_146
# min_15 => minimum_14
# min_151 => minimum_150
# min_155 => minimum_154
# min_159 => minimum_158
# min_163 => minimum_162
# min_167 => minimum_166
# min_171 => minimum_170
# min_175 => minimum_174
# min_179 => minimum_178
# min_183 => minimum_182
# min_187 => minimum_186
# min_19 => minimum_18
# min_191 => minimum_190
# min_195 => minimum_194
# min_199 => minimum_198
# min_23 => minimum_22
# min_27 => minimum_26
# min_3 => minimum_2
# min_31 => minimum_30
# min_35 => minimum_34
# min_39 => minimum_38
# min_43 => minimum_42
# min_47 => minimum_46
# min_51 => minimum_50
# min_55 => minimum_54
# min_59 => minimum_58
# min_63 => minimum_62
# min_67 => minimum_66
# min_7 => minimum_6
# min_71 => minimum_70
# min_75 => minimum_74
# min_79 => minimum_78
# min_83 => minimum_82
# min_87 => minimum_86
# min_91 => minimum_90
# min_95 => minimum_94
# min_99 => minimum_98
# mul => mul
# mul_1 => mul_1
# mul_10 => mul_10
# mul_11 => mul_11
# mul_12 => mul_12
# mul_13 => mul_13
# mul_14 => mul_14
# mul_15 => mul_15
# mul_16 => mul_16
# mul_17 => mul_17
# mul_18 => mul_18
# mul_19 => mul_19
# mul_2 => mul_2
# mul_20 => mul_20
# mul_21 => mul_21
# mul_22 => mul_22
# mul_23 => mul_23
# mul_24 => mul_24
# mul_25 => mul_25
# mul_26 => mul_26
# mul_27 => mul_27
# mul_28 => mul_28
# mul_29 => mul_29
# mul_3 => mul_3
# mul_30 => mul_30
# mul_31 => mul_31
# mul_32 => mul_32
# mul_33 => mul_33
# mul_34 => mul_34
# mul_35 => mul_35
# mul_36 => mul_36
# mul_37 => mul_37
# mul_38 => mul_38
# mul_39 => mul_39
# mul_4 => mul_4
# mul_40 => mul_40
# mul_41 => mul_41
# mul_42 => mul_42
# mul_43 => mul_43
# mul_44 => mul_44
# mul_45 => mul_45
# mul_46 => mul_46
# mul_47 => mul_47
# mul_48 => mul_48
# mul_49 => mul_49
# mul_5 => mul_5
# mul_6 => mul_6
# mul_7 => mul_7
# mul_8 => mul_8
# mul_9 => mul_9
# p1_1 => neg_7
# p1_11 => neg_67
# p1_13 => neg_79
# p1_15 => neg_91
# p1_17 => neg_103
# p1_19 => neg_115
# p1_21 => neg_127
# p1_23 => neg_139
# p1_25 => neg_151
# p1_27 => neg_163
# p1_29 => neg_175
# p1_3 => neg_19
# p1_31 => neg_187
# p1_33 => neg_199
# p1_35 => neg_211
# p1_37 => neg_223
# p1_39 => neg_235
# p1_41 => neg_247
# p1_43 => neg_259
# p1_45 => neg_271
# p1_47 => neg_283
# p1_49 => neg_295
# p1_5 => neg_31
# p1_51 => neg_307
# p1_53 => neg_319
# p1_55 => neg_331
# p1_57 => neg_343
# p1_59 => neg_355
# p1_61 => neg_367
# p1_63 => neg_379
# p1_65 => neg_391
# p1_67 => neg_403
# p1_69 => neg_415
# p1_7 => neg_43
# p1_71 => neg_427
# p1_73 => neg_439
# p1_75 => neg_451
# p1_77 => neg_463
# p1_79 => neg_475
# p1_81 => neg_487
# p1_83 => neg_499
# p1_85 => neg_511
# p1_87 => neg_523
# p1_89 => neg_535
# p1_9 => neg_55
# p1_91 => neg_547
# p1_93 => neg_559
# p1_95 => neg_571
# p1_97 => neg_583
# p1_99 => neg_595
# p2_1 => neg_9
# p2_11 => neg_69
# p2_13 => neg_81
# p2_15 => neg_93
# p2_17 => neg_105
# p2_19 => neg_117
# p2_21 => neg_129
# p2_23 => neg_141
# p2_25 => neg_153
# p2_27 => neg_165
# p2_29 => neg_177
# p2_3 => neg_21
# p2_31 => neg_189
# p2_33 => neg_201
# p2_35 => neg_213
# p2_37 => neg_225
# p2_39 => neg_237
# p2_41 => neg_249
# p2_43 => neg_261
# p2_45 => neg_273
# p2_47 => neg_285
# p2_49 => neg_297
# p2_5 => neg_33
# p2_51 => neg_309
# p2_53 => neg_321
# p2_55 => neg_333
# p2_57 => neg_345
# p2_59 => neg_357
# p2_61 => neg_369
# p2_63 => neg_381
# p2_65 => neg_393
# p2_67 => neg_405
# p2_69 => neg_417
# p2_7 => neg_45
# p2_71 => neg_429
# p2_73 => neg_441
# p2_75 => neg_453
# p2_77 => neg_465
# p2_79 => neg_477
# p2_81 => neg_489
# p2_83 => neg_501
# p2_85 => neg_513
# p2_87 => neg_525
# p2_89 => neg_537
# p2_9 => neg_57
# p2_91 => neg_549
# p2_93 => neg_561
# p2_95 => neg_573
# p2_97 => neg_585
# p2_99 => neg_597
# p3_1 => neg_11
# p3_11 => neg_71
# p3_13 => neg_83
# p3_15 => neg_95
# p3_17 => neg_107
# p3_19 => neg_119
# p3_21 => neg_131
# p3_23 => neg_143
# p3_25 => neg_155
# p3_27 => neg_167
# p3_29 => neg_179
# p3_3 => neg_23
# p3_31 => neg_191
# p3_33 => neg_203
# p3_35 => neg_215
# p3_37 => neg_227
# p3_39 => neg_239
# p3_41 => neg_251
# p3_43 => neg_263
# p3_45 => neg_275
# p3_47 => neg_287
# p3_49 => neg_299
# p3_5 => neg_35
# p3_51 => neg_311
# p3_53 => neg_323
# p3_55 => neg_335
# p3_57 => neg_347
# p3_59 => neg_359
# p3_61 => neg_371
# p3_63 => neg_383
# p3_65 => neg_395
# p3_67 => neg_407
# p3_69 => neg_419
# p3_7 => neg_47
# p3_71 => neg_431
# p3_73 => neg_443
# p3_75 => neg_455
# p3_77 => neg_467
# p3_79 => neg_479
# p3_81 => neg_491
# p3_83 => neg_503
# p3_85 => neg_515
# p3_87 => neg_527
# p3_89 => neg_539
# p3_9 => neg_59
# p3_91 => neg_551
# p3_93 => neg_563
# p3_95 => neg_575
# p3_97 => neg_587
# p3_99 => neg_599
# relu_10 => relu_10
# relu_100 => relu_100
# relu_12 => relu_12
# relu_14 => relu_14
# relu_16 => relu_16
# relu_18 => relu_18
# relu_2 => relu_2
# relu_20 => relu_20
# relu_22 => relu_22
# relu_24 => relu_24
# relu_26 => relu_26
# relu_28 => relu_28
# relu_30 => relu_30
# relu_32 => relu_32
# relu_34 => relu_34
# relu_36 => relu_36
# relu_38 => relu_38
# relu_4 => relu_4
# relu_40 => relu_40
# relu_42 => relu_42
# relu_44 => relu_44
# relu_46 => relu_46
# relu_48 => relu_48
# relu_50 => relu_50
# relu_52 => relu_52
# relu_54 => relu_54
# relu_56 => relu_56
# relu_58 => relu_58
# relu_6 => relu_6
# relu_60 => relu_60
# relu_62 => relu_62
# relu_64 => relu_64
# relu_66 => relu_66
# relu_68 => relu_68
# relu_70 => relu_70
# relu_72 => relu_72
# relu_74 => relu_74
# relu_76 => relu_76
# relu_78 => relu_78
# relu_8 => relu_8
# relu_80 => relu_80
# relu_82 => relu_82
# relu_84 => relu_84
# relu_86 => relu_86
# relu_88 => relu_88
# relu_90 => relu_90
# relu_92 => relu_92
# relu_94 => relu_94
# relu_96 => relu_96
# relu_98 => relu_98
# skel => relu
# skel_1 => add
# skel_10 => add_9
# skel_11 => add_10
# skel_12 => add_11
# skel_13 => add_12
# skel_14 => add_13
# skel_15 => add_14
# skel_16 => add_15
# skel_17 => add_16
# skel_18 => add_17
# skel_19 => add_18
# skel_2 => add_1
# skel_20 => add_19
# skel_21 => add_20
# skel_22 => add_21
# skel_23 => add_22
# skel_24 => add_23
# skel_25 => add_24
# skel_26 => add_25
# skel_27 => add_26
# skel_28 => add_27
# skel_29 => add_28
# skel_3 => add_2
# skel_30 => add_29
# skel_31 => add_30
# skel_32 => add_31
# skel_33 => add_32
# skel_34 => add_33
# skel_35 => add_34
# skel_36 => add_35
# skel_37 => add_36
# skel_38 => add_37
# skel_39 => add_38
# skel_4 => add_3
# skel_40 => add_39
# skel_41 => add_40
# skel_42 => add_41
# skel_43 => add_42
# skel_44 => add_43
# skel_45 => add_44
# skel_46 => add_45
# skel_47 => add_46
# skel_48 => add_47
# skel_49 => add_48
# skel_5 => add_4
# skel_50 => add_49
# skel_6 => add_5
# skel_7 => add_6
# skel_8 => add_7
# skel_9 => add_8
# sub => sub
# sub_1 => sub_1
# sub_10 => sub_10
# sub_100 => sub_100
# sub_11 => sub_11
# sub_12 => sub_12
# sub_13 => sub_13
# sub_14 => sub_14
# sub_15 => sub_15
# sub_16 => sub_16
# sub_17 => sub_17
# sub_18 => sub_18
# sub_19 => sub_19
# sub_2 => sub_2
# sub_20 => sub_20
# sub_21 => sub_21
# sub_22 => sub_22
# sub_23 => sub_23
# sub_24 => sub_24
# sub_25 => sub_25
# sub_26 => sub_26
# sub_27 => sub_27
# sub_28 => sub_28
# sub_29 => sub_29
# sub_3 => sub_3
# sub_30 => sub_30
# sub_31 => sub_31
# sub_32 => sub_32
# sub_33 => sub_33
# sub_34 => sub_34
# sub_35 => sub_35
# sub_36 => sub_36
# sub_37 => sub_37
# sub_38 => sub_38
# sub_39 => sub_39
# sub_4 => sub_4
# sub_40 => sub_40
# sub_41 => sub_41
# sub_42 => sub_42
# sub_43 => sub_43
# sub_44 => sub_44
# sub_45 => sub_45
# sub_46 => sub_46
# sub_47 => sub_47
# sub_48 => sub_48
# sub_49 => sub_49
# sub_5 => sub_5
# sub_50 => sub_50
# sub_51 => sub_51
# sub_52 => sub_52
# sub_53 => sub_53
# sub_54 => sub_54
# sub_55 => sub_55
# sub_56 => sub_56
# sub_57 => sub_57
# sub_58 => sub_58
# sub_59 => sub_59
# sub_6 => sub_6
# sub_60 => sub_60
# sub_61 => sub_61
# sub_62 => sub_62
# sub_63 => sub_63
# sub_64 => sub_64
# sub_65 => sub_65
# sub_66 => sub_66
# sub_67 => sub_67
# sub_68 => sub_68
# sub_69 => sub_69
# sub_7 => sub_7
# sub_70 => sub_70
# sub_71 => sub_71
# sub_72 => sub_72
# sub_73 => sub_73
# sub_74 => sub_74
# sub_75 => sub_75
# sub_76 => sub_76
# sub_77 => sub_77
# sub_78 => sub_78
# sub_79 => sub_79
# sub_8 => sub_8
# sub_80 => sub_80
# sub_81 => sub_81
# sub_82 => sub_82
# sub_83 => sub_83
# sub_84 => sub_84
# sub_85 => sub_85
# sub_86 => sub_86
# sub_87 => sub_87
# sub_88 => sub_88
# sub_89 => sub_89
# sub_9 => sub_9
# sub_90 => sub_90
# sub_91 => sub_91
# sub_92 => sub_92
# sub_93 => sub_93
# sub_94 => sub_94
# sub_95 => sub_95
# sub_96 => sub_96
# sub_97 => sub_97
# sub_98 => sub_98
# sub_99 => sub_99
# Graph fragment:
# %neg_7 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_8,), kwargs = {})
# %neg_11 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_12,), kwargs = {})
# %minimum_2 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_7, %neg_11), kwargs = {})
# %neg_9 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_10,), kwargs = {})
# %minimum_3 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_2, %neg_9), kwargs = {})
# %neg_19 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_22,), kwargs = {})
# %neg_23 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_26,), kwargs = {})
# %minimum_6 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_19, %neg_23), kwargs = {})
# %neg_21 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_24,), kwargs = {})
# %minimum_7 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_6, %neg_21), kwargs = {})
# %neg_31 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_36,), kwargs = {})
# %neg_35 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_40,), kwargs = {})
# %minimum_10 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_31, %neg_35), kwargs = {})
# %neg_33 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_38,), kwargs = {})
# %minimum_11 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_10, %neg_33), kwargs = {})
# %neg_43 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_50,), kwargs = {})
# %neg_47 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_54,), kwargs = {})
# %minimum_14 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_43, %neg_47), kwargs = {})
# %neg_45 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_52,), kwargs = {})
# %minimum_15 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_14, %neg_45), kwargs = {})
# %neg_55 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_64,), kwargs = {})
# %neg_59 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_68,), kwargs = {})
# %minimum_18 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_55, %neg_59), kwargs = {})
# %neg_57 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_66,), kwargs = {})
# %minimum_19 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_18, %neg_57), kwargs = {})
# %neg_67 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_78,), kwargs = {})
# %neg_71 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_82,), kwargs = {})
# %minimum_22 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_67, %neg_71), kwargs = {})
# %neg_69 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_80,), kwargs = {})
# %minimum_23 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_22, %neg_69), kwargs = {})
# %neg_79 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_92,), kwargs = {})
# %neg_83 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_96,), kwargs = {})
# %minimum_26 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_79, %neg_83), kwargs = {})
# %neg_81 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_94,), kwargs = {})
# %minimum_27 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_26, %neg_81), kwargs = {})
# %neg_91 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_106,), kwargs = {})
# %neg_95 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_110,), kwargs = {})
# %minimum_30 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_91, %neg_95), kwargs = {})
# %neg_93 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_108,), kwargs = {})
# %minimum_31 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_30, %neg_93), kwargs = {})
# %neg_103 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_120,), kwargs = {})
# %neg_107 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_124,), kwargs = {})
# %minimum_34 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_103, %neg_107), kwargs = {})
# %neg_105 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_122,), kwargs = {})
# %minimum_35 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_34, %neg_105), kwargs = {})
# %neg_115 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_134,), kwargs = {})
# %neg_119 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_138,), kwargs = {})
# %minimum_38 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_115, %neg_119), kwargs = {})
# %neg_117 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_136,), kwargs = {})
# %minimum_39 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_38, %neg_117), kwargs = {})
# %neg_127 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_148,), kwargs = {})
# %neg_131 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_152,), kwargs = {})
# %minimum_42 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_127, %neg_131), kwargs = {})
# %neg_129 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_150,), kwargs = {})
# %minimum_43 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_42, %neg_129), kwargs = {})
# %neg_139 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_162,), kwargs = {})
# %neg_143 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_166,), kwargs = {})
# %minimum_46 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_139, %neg_143), kwargs = {})
# %neg_141 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_164,), kwargs = {})
# %minimum_47 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_46, %neg_141), kwargs = {})
# %neg_151 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_176,), kwargs = {})
# %neg_155 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_180,), kwargs = {})
# %minimum_50 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_151, %neg_155), kwargs = {})
# %neg_153 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_178,), kwargs = {})
# %minimum_51 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_50, %neg_153), kwargs = {})
# %neg_163 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_190,), kwargs = {})
# %neg_167 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_194,), kwargs = {})
# %minimum_54 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_163, %neg_167), kwargs = {})
# %neg_165 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_192,), kwargs = {})
# %minimum_55 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_54, %neg_165), kwargs = {})
# %neg_175 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_204,), kwargs = {})
# %neg_179 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_208,), kwargs = {})
# %minimum_58 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_175, %neg_179), kwargs = {})
# %neg_177 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_206,), kwargs = {})
# %minimum_59 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_58, %neg_177), kwargs = {})
# %neg_187 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_218,), kwargs = {})
# %neg_191 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_222,), kwargs = {})
# %minimum_62 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_187, %neg_191), kwargs = {})
# %neg_189 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_220,), kwargs = {})
# %minimum_63 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_62, %neg_189), kwargs = {})
# %neg_199 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_232,), kwargs = {})
# %neg_203 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_236,), kwargs = {})
# %minimum_66 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_199, %neg_203), kwargs = {})
# %neg_201 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_234,), kwargs = {})
# %minimum_67 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_66, %neg_201), kwargs = {})
# %neg_211 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_246,), kwargs = {})
# %neg_215 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_250,), kwargs = {})
# %minimum_70 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_211, %neg_215), kwargs = {})
# %neg_213 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_248,), kwargs = {})
# %minimum_71 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_70, %neg_213), kwargs = {})
# %neg_223 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_260,), kwargs = {})
# %neg_227 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_264,), kwargs = {})
# %minimum_74 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_223, %neg_227), kwargs = {})
# %neg_225 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_262,), kwargs = {})
# %minimum_75 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_74, %neg_225), kwargs = {})
# %neg_235 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_274,), kwargs = {})
# %neg_239 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_278,), kwargs = {})
# %minimum_78 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_235, %neg_239), kwargs = {})
# %neg_237 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_276,), kwargs = {})
# %minimum_79 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_78, %neg_237), kwargs = {})
# %neg_247 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_288,), kwargs = {})
# %neg_251 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_292,), kwargs = {})
# %minimum_82 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_247, %neg_251), kwargs = {})
# %neg_249 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_290,), kwargs = {})
# %minimum_83 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_82, %neg_249), kwargs = {})
# %neg_259 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_302,), kwargs = {})
# %neg_263 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_306,), kwargs = {})
# %minimum_86 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_259, %neg_263), kwargs = {})
# %neg_261 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_304,), kwargs = {})
# %minimum_87 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_86, %neg_261), kwargs = {})
# %neg_271 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_316,), kwargs = {})
# %neg_275 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_320,), kwargs = {})
# %minimum_90 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_271, %neg_275), kwargs = {})
# %neg_273 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_318,), kwargs = {})
# %minimum_91 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_90, %neg_273), kwargs = {})
# %neg_283 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_330,), kwargs = {})
# %neg_287 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_334,), kwargs = {})
# %minimum_94 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_283, %neg_287), kwargs = {})
# %neg_285 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_332,), kwargs = {})
# %minimum_95 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_94, %neg_285), kwargs = {})
# %neg_295 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_344,), kwargs = {})
# %neg_299 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_348,), kwargs = {})
# %minimum_98 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_295, %neg_299), kwargs = {})
# %neg_297 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_346,), kwargs = {})
# %minimum_99 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_98, %neg_297), kwargs = {})
# %neg_307 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_358,), kwargs = {})
# %neg_311 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_362,), kwargs = {})
# %minimum_102 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_307, %neg_311), kwargs = {})
# %neg_309 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_360,), kwargs = {})
# %minimum_103 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_102, %neg_309), kwargs = {})
# %neg_319 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_372,), kwargs = {})
# %neg_323 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_376,), kwargs = {})
# %minimum_106 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_319, %neg_323), kwargs = {})
# %neg_321 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_374,), kwargs = {})
# %minimum_107 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_106, %neg_321), kwargs = {})
# %neg_331 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_386,), kwargs = {})
# %neg_335 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_390,), kwargs = {})
# %minimum_110 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_331, %neg_335), kwargs = {})
# %neg_333 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_388,), kwargs = {})
# %minimum_111 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_110, %neg_333), kwargs = {})
# %neg_343 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_400,), kwargs = {})
# %neg_347 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_404,), kwargs = {})
# %minimum_114 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_343, %neg_347), kwargs = {})
# %neg_345 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_402,), kwargs = {})
# %minimum_115 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_114, %neg_345), kwargs = {})
# %neg_355 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_414,), kwargs = {})
# %neg_359 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_418,), kwargs = {})
# %minimum_118 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_355, %neg_359), kwargs = {})
# %neg_357 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_416,), kwargs = {})
# %minimum_119 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_118, %neg_357), kwargs = {})
# %neg_367 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_428,), kwargs = {})
# %neg_371 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_432,), kwargs = {})
# %minimum_122 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_367, %neg_371), kwargs = {})
# %neg_369 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_430,), kwargs = {})
# %minimum_123 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_122, %neg_369), kwargs = {})
# %neg_379 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_442,), kwargs = {})
# %neg_383 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_446,), kwargs = {})
# %minimum_126 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_379, %neg_383), kwargs = {})
# %neg_381 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_444,), kwargs = {})
# %minimum_127 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_126, %neg_381), kwargs = {})
# %neg_391 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_456,), kwargs = {})
# %neg_395 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_460,), kwargs = {})
# %minimum_130 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_391, %neg_395), kwargs = {})
# %neg_393 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_458,), kwargs = {})
# %minimum_131 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_130, %neg_393), kwargs = {})
# %neg_403 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_470,), kwargs = {})
# %neg_407 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_474,), kwargs = {})
# %minimum_134 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_403, %neg_407), kwargs = {})
# %neg_405 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_472,), kwargs = {})
# %minimum_135 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_134, %neg_405), kwargs = {})
# %neg_415 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_484,), kwargs = {})
# %neg_419 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_488,), kwargs = {})
# %minimum_138 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_415, %neg_419), kwargs = {})
# %neg_417 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_486,), kwargs = {})
# %minimum_139 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_138, %neg_417), kwargs = {})
# %neg_427 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_498,), kwargs = {})
# %neg_431 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_502,), kwargs = {})
# %minimum_142 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_427, %neg_431), kwargs = {})
# %neg_429 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_500,), kwargs = {})
# %minimum_143 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_142, %neg_429), kwargs = {})
# %neg_439 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_512,), kwargs = {})
# %neg_443 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_516,), kwargs = {})
# %minimum_146 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_439, %neg_443), kwargs = {})
# %neg_441 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_514,), kwargs = {})
# %minimum_147 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_146, %neg_441), kwargs = {})
# %neg_451 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_526,), kwargs = {})
# %neg_455 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_530,), kwargs = {})
# %minimum_150 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_451, %neg_455), kwargs = {})
# %neg_453 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_528,), kwargs = {})
# %minimum_151 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_150, %neg_453), kwargs = {})
# %neg_463 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_540,), kwargs = {})
# %neg_467 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_544,), kwargs = {})
# %minimum_154 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_463, %neg_467), kwargs = {})
# %neg_465 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_542,), kwargs = {})
# %minimum_155 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_154, %neg_465), kwargs = {})
# %neg_475 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_554,), kwargs = {})
# %neg_479 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_558,), kwargs = {})
# %minimum_158 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_475, %neg_479), kwargs = {})
# %neg_477 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_556,), kwargs = {})
# %minimum_159 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_158, %neg_477), kwargs = {})
# %neg_487 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_568,), kwargs = {})
# %neg_491 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_572,), kwargs = {})
# %minimum_162 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_487, %neg_491), kwargs = {})
# %neg_489 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_570,), kwargs = {})
# %minimum_163 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_162, %neg_489), kwargs = {})
# %neg_499 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_582,), kwargs = {})
# %neg_503 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_586,), kwargs = {})
# %minimum_166 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_499, %neg_503), kwargs = {})
# %neg_501 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_584,), kwargs = {})
# %minimum_167 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_166, %neg_501), kwargs = {})
# %neg_511 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_596,), kwargs = {})
# %neg_515 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_600,), kwargs = {})
# %minimum_170 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_511, %neg_515), kwargs = {})
# %neg_513 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_598,), kwargs = {})
# %minimum_171 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_170, %neg_513), kwargs = {})
# %neg_523 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_610,), kwargs = {})
# %neg_527 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_614,), kwargs = {})
# %minimum_174 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_523, %neg_527), kwargs = {})
# %neg_525 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_612,), kwargs = {})
# %minimum_175 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_174, %neg_525), kwargs = {})
# %neg_535 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_624,), kwargs = {})
# %neg_539 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_628,), kwargs = {})
# %minimum_178 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_535, %neg_539), kwargs = {})
# %neg_537 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_626,), kwargs = {})
# %minimum_179 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_178, %neg_537), kwargs = {})
# %neg_547 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_638,), kwargs = {})
# %neg_551 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_642,), kwargs = {})
# %minimum_182 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_547, %neg_551), kwargs = {})
# %neg_549 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_640,), kwargs = {})
# %minimum_183 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_182, %neg_549), kwargs = {})
# %neg_559 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_652,), kwargs = {})
# %neg_563 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_656,), kwargs = {})
# %minimum_186 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_559, %neg_563), kwargs = {})
# %neg_561 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_654,), kwargs = {})
# %minimum_187 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_186, %neg_561), kwargs = {})
# %neg_571 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_666,), kwargs = {})
# %neg_575 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_670,), kwargs = {})
# %minimum_190 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_571, %neg_575), kwargs = {})
# %neg_573 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_668,), kwargs = {})
# %minimum_191 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_190, %neg_573), kwargs = {})
# %neg_583 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_680,), kwargs = {})
# %neg_587 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_684,), kwargs = {})
# %minimum_194 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_583, %neg_587), kwargs = {})
# %neg_585 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_682,), kwargs = {})
# %minimum_195 : [num_users=7] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_194, %neg_585), kwargs = {})
# %neg_595 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_694,), kwargs = {})
# %neg_599 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_698,), kwargs = {})
# %minimum_198 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%neg_595, %neg_599), kwargs = {})
# %neg_597 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_696,), kwargs = {})
# %minimum_199 : [num_users=4] = call_function[target=torch.ops.aten.minimum.default](args = (%minimum_198, %neg_597), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %getitem_6), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_3, %getitem_20), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu, %relu_1), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_1, %mul), kwargs = {})
# %relu_2 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_2,), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu, %relu_2), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_7, %getitem_34), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_3,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %relu_3), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_3, %mul_1), kwargs = {})
# %relu_4 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_4,), kwargs = {})
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %relu_4), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_11, %getitem_48), kwargs = {})
# %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_5,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, %relu_5), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_5, %mul_2), kwargs = {})
# %relu_6 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_6,), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %relu_6), kwargs = {})
# %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_15, %getitem_62), kwargs = {})
# %relu_7 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_7,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, %relu_7), kwargs = {})
# %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_7, %mul_3), kwargs = {})
# %relu_8 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_8,), kwargs = {})
# %add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %relu_8), kwargs = {})
# %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_19, %getitem_76), kwargs = {})
# %relu_9 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_9,), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_3, %relu_9), kwargs = {})
# %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_9, %mul_4), kwargs = {})
# %relu_10 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_10,), kwargs = {})
# %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %relu_10), kwargs = {})
# %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_23, %getitem_90), kwargs = {})
# %relu_11 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_11,), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_4, %relu_11), kwargs = {})
# %sub_12 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_11, %mul_5), kwargs = {})
# %relu_12 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_12,), kwargs = {})
# %add_5 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %relu_12), kwargs = {})
# %sub_13 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_27, %getitem_104), kwargs = {})
# %relu_13 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_13,), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_5, %relu_13), kwargs = {})
# %sub_14 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_13, %mul_6), kwargs = {})
# %relu_14 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_14,), kwargs = {})
# %add_6 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %relu_14), kwargs = {})
# %sub_15 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_31, %getitem_118), kwargs = {})
# %relu_15 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_15,), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_6, %relu_15), kwargs = {})
# %sub_16 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_15, %mul_7), kwargs = {})
# %relu_16 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_16,), kwargs = {})
# %add_7 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_6, %relu_16), kwargs = {})
# %sub_17 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_35, %getitem_132), kwargs = {})
# %relu_17 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_17,), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_7, %relu_17), kwargs = {})
# %sub_18 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_17, %mul_8), kwargs = {})
# %relu_18 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_18,), kwargs = {})
# %add_8 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_7, %relu_18), kwargs = {})
# %sub_19 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_39, %getitem_146), kwargs = {})
# %relu_19 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_19,), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_8, %relu_19), kwargs = {})
# %sub_20 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_19, %mul_9), kwargs = {})
# %relu_20 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_20,), kwargs = {})
# %add_9 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_8, %relu_20), kwargs = {})
# %sub_21 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_43, %getitem_160), kwargs = {})
# %relu_21 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_21,), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_9, %relu_21), kwargs = {})
# %sub_22 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_21, %mul_10), kwargs = {})
# %relu_22 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_22,), kwargs = {})
# %add_10 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_9, %relu_22), kwargs = {})
# %sub_23 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_47, %getitem_174), kwargs = {})
# %relu_23 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_23,), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_10, %relu_23), kwargs = {})
# %sub_24 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_23, %mul_11), kwargs = {})
# %relu_24 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_24,), kwargs = {})
# %add_11 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_10, %relu_24), kwargs = {})
# %sub_25 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_51, %getitem_188), kwargs = {})
# %relu_25 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_25,), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_11, %relu_25), kwargs = {})
# %sub_26 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_25, %mul_12), kwargs = {})
# %relu_26 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_26,), kwargs = {})
# %add_12 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_11, %relu_26), kwargs = {})
# %sub_27 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_55, %getitem_202), kwargs = {})
# %relu_27 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_27,), kwargs = {})
# %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_12, %relu_27), kwargs = {})
# %sub_28 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_27, %mul_13), kwargs = {})
# %relu_28 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_28,), kwargs = {})
# %add_13 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_12, %relu_28), kwargs = {})
# %sub_29 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_59, %getitem_216), kwargs = {})
# %relu_29 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_29,), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_13, %relu_29), kwargs = {})
# %sub_30 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_29, %mul_14), kwargs = {})
# %relu_30 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_30,), kwargs = {})
# %add_14 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_13, %relu_30), kwargs = {})
# %sub_31 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_63, %getitem_230), kwargs = {})
# %relu_31 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_31,), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_14, %relu_31), kwargs = {})
# %sub_32 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_31, %mul_15), kwargs = {})
# %relu_32 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_32,), kwargs = {})
# %add_15 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_14, %relu_32), kwargs = {})
# %sub_33 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_67, %getitem_244), kwargs = {})
# %relu_33 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_33,), kwargs = {})
# %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_15, %relu_33), kwargs = {})
# %sub_34 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_33, %mul_16), kwargs = {})
# %relu_34 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_34,), kwargs = {})
# %add_16 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_15, %relu_34), kwargs = {})
# %sub_35 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_71, %getitem_258), kwargs = {})
# %relu_35 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_35,), kwargs = {})
# %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_16, %relu_35), kwargs = {})
# %sub_36 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_35, %mul_17), kwargs = {})
# %relu_36 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_36,), kwargs = {})
# %add_17 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_16, %relu_36), kwargs = {})
# %sub_37 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_75, %getitem_272), kwargs = {})
# %relu_37 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_37,), kwargs = {})
# %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_17, %relu_37), kwargs = {})
# %sub_38 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_37, %mul_18), kwargs = {})
# %relu_38 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_38,), kwargs = {})
# %add_18 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_17, %relu_38), kwargs = {})
# %sub_39 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_79, %getitem_286), kwargs = {})
# %relu_39 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_39,), kwargs = {})
# %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_18, %relu_39), kwargs = {})
# %sub_40 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_39, %mul_19), kwargs = {})
# %relu_40 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_40,), kwargs = {})
# %add_19 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_18, %relu_40), kwargs = {})
# %sub_41 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_83, %getitem_300), kwargs = {})
# %relu_41 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_41,), kwargs = {})
# %mul_20 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_19, %relu_41), kwargs = {})
# %sub_42 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_41, %mul_20), kwargs = {})
# %relu_42 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_42,), kwargs = {})
# %add_20 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_19, %relu_42), kwargs = {})
# %sub_43 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_87, %getitem_314), kwargs = {})
# %relu_43 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_43,), kwargs = {})
# %mul_21 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_20, %relu_43), kwargs = {})
# %sub_44 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_43, %mul_21), kwargs = {})
# %relu_44 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_44,), kwargs = {})
# %add_21 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_20, %relu_44), kwargs = {})
# %sub_45 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_91, %getitem_328), kwargs = {})
# %relu_45 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_45,), kwargs = {})
# %mul_22 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_21, %relu_45), kwargs = {})
# %sub_46 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_45, %mul_22), kwargs = {})
# %relu_46 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_46,), kwargs = {})
# %add_22 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_21, %relu_46), kwargs = {})
# %sub_47 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_95, %getitem_342), kwargs = {})
# %relu_47 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_47,), kwargs = {})
# %mul_23 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_22, %relu_47), kwargs = {})
# %sub_48 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_47, %mul_23), kwargs = {})
# %relu_48 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_48,), kwargs = {})
# %add_23 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_22, %relu_48), kwargs = {})
# %sub_49 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_99, %getitem_356), kwargs = {})
# %relu_49 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_49,), kwargs = {})
# %mul_24 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_23, %relu_49), kwargs = {})
# %sub_50 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_49, %mul_24), kwargs = {})
# %relu_50 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_50,), kwargs = {})
# %add_24 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_23, %relu_50), kwargs = {})
# %sub_51 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_103, %getitem_370), kwargs = {})
# %relu_51 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_51,), kwargs = {})
# %mul_25 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_24, %relu_51), kwargs = {})
# %sub_52 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_51, %mul_25), kwargs = {})
# %relu_52 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_52,), kwargs = {})
# %add_25 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_24, %relu_52), kwargs = {})
# %sub_53 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_107, %getitem_384), kwargs = {})
# %relu_53 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_53,), kwargs = {})
# %mul_26 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_25, %relu_53), kwargs = {})
# %sub_54 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_53, %mul_26), kwargs = {})
# %relu_54 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_54,), kwargs = {})
# %add_26 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_25, %relu_54), kwargs = {})
# %sub_55 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_111, %getitem_398), kwargs = {})
# %relu_55 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_55,), kwargs = {})
# %mul_27 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_26, %relu_55), kwargs = {})
# %sub_56 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_55, %mul_27), kwargs = {})
# %relu_56 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_56,), kwargs = {})
# %add_27 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_26, %relu_56), kwargs = {})
# %sub_57 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_115, %getitem_412), kwargs = {})
# %relu_57 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_57,), kwargs = {})
# %mul_28 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_27, %relu_57), kwargs = {})
# %sub_58 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_57, %mul_28), kwargs = {})
# %relu_58 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_58,), kwargs = {})
# %add_28 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_27, %relu_58), kwargs = {})
# %sub_59 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_119, %getitem_426), kwargs = {})
# %relu_59 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_59,), kwargs = {})
# %mul_29 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_28, %relu_59), kwargs = {})
# %sub_60 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_59, %mul_29), kwargs = {})
# %relu_60 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_60,), kwargs = {})
# %add_29 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_28, %relu_60), kwargs = {})
# %sub_61 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_123, %getitem_440), kwargs = {})
# %relu_61 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_61,), kwargs = {})
# %mul_30 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_29, %relu_61), kwargs = {})
# %sub_62 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_61, %mul_30), kwargs = {})
# %relu_62 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_62,), kwargs = {})
# %add_30 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_29, %relu_62), kwargs = {})
# %sub_63 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_127, %getitem_454), kwargs = {})
# %relu_63 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_63,), kwargs = {})
# %mul_31 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_30, %relu_63), kwargs = {})
# %sub_64 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_63, %mul_31), kwargs = {})
# %relu_64 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_64,), kwargs = {})
# %add_31 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_30, %relu_64), kwargs = {})
# %sub_65 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_131, %getitem_468), kwargs = {})
# %relu_65 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_65,), kwargs = {})
# %mul_32 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_31, %relu_65), kwargs = {})
# %sub_66 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_65, %mul_32), kwargs = {})
# %relu_66 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_66,), kwargs = {})
# %add_32 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_31, %relu_66), kwargs = {})
# %sub_67 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_135, %getitem_482), kwargs = {})
# %relu_67 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_67,), kwargs = {})
# %mul_33 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_32, %relu_67), kwargs = {})
# %sub_68 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_67, %mul_33), kwargs = {})
# %relu_68 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_68,), kwargs = {})
# %add_33 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_32, %relu_68), kwargs = {})
# %sub_69 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_139, %getitem_496), kwargs = {})
# %relu_69 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_69,), kwargs = {})
# %mul_34 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_33, %relu_69), kwargs = {})
# %sub_70 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_69, %mul_34), kwargs = {})
# %relu_70 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_70,), kwargs = {})
# %add_34 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_33, %relu_70), kwargs = {})
# %sub_71 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_143, %getitem_510), kwargs = {})
# %relu_71 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_71,), kwargs = {})
# %mul_35 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_34, %relu_71), kwargs = {})
# %sub_72 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_71, %mul_35), kwargs = {})
# %relu_72 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_72,), kwargs = {})
# %add_35 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_34, %relu_72), kwargs = {})
# %sub_73 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_147, %getitem_524), kwargs = {})
# %relu_73 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_73,), kwargs = {})
# %mul_36 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_35, %relu_73), kwargs = {})
# %sub_74 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_73, %mul_36), kwargs = {})
# %relu_74 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_74,), kwargs = {})
# %add_36 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_35, %relu_74), kwargs = {})
# %sub_75 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_151, %getitem_538), kwargs = {})
# %relu_75 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_75,), kwargs = {})
# %mul_37 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_36, %relu_75), kwargs = {})
# %sub_76 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_75, %mul_37), kwargs = {})
# %relu_76 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_76,), kwargs = {})
# %add_37 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_36, %relu_76), kwargs = {})
# %sub_77 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_155, %getitem_552), kwargs = {})
# %relu_77 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_77,), kwargs = {})
# %mul_38 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_37, %relu_77), kwargs = {})
# %sub_78 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_77, %mul_38), kwargs = {})
# %relu_78 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_78,), kwargs = {})
# %add_38 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_37, %relu_78), kwargs = {})
# %sub_79 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_159, %getitem_566), kwargs = {})
# %relu_79 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_79,), kwargs = {})
# %mul_39 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_38, %relu_79), kwargs = {})
# %sub_80 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_79, %mul_39), kwargs = {})
# %relu_80 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_80,), kwargs = {})
# %add_39 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_38, %relu_80), kwargs = {})
# %sub_81 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_163, %getitem_580), kwargs = {})
# %relu_81 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_81,), kwargs = {})
# %mul_40 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_39, %relu_81), kwargs = {})
# %sub_82 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_81, %mul_40), kwargs = {})
# %relu_82 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_82,), kwargs = {})
# %add_40 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_39, %relu_82), kwargs = {})
# %sub_83 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_167, %getitem_594), kwargs = {})
# %relu_83 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_83,), kwargs = {})
# %mul_41 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_40, %relu_83), kwargs = {})
# %sub_84 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_83, %mul_41), kwargs = {})
# %relu_84 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_84,), kwargs = {})
# %add_41 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_40, %relu_84), kwargs = {})
# %sub_85 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_171, %getitem_608), kwargs = {})
# %relu_85 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_85,), kwargs = {})
# %mul_42 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_41, %relu_85), kwargs = {})
# %sub_86 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_85, %mul_42), kwargs = {})
# %relu_86 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_86,), kwargs = {})
# %add_42 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_41, %relu_86), kwargs = {})
# %sub_87 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_175, %getitem_622), kwargs = {})
# %relu_87 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_87,), kwargs = {})
# %mul_43 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_42, %relu_87), kwargs = {})
# %sub_88 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_87, %mul_43), kwargs = {})
# %relu_88 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_88,), kwargs = {})
# %add_43 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_42, %relu_88), kwargs = {})
# %sub_89 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_179, %getitem_636), kwargs = {})
# %relu_89 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_89,), kwargs = {})
# %mul_44 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_43, %relu_89), kwargs = {})
# %sub_90 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_89, %mul_44), kwargs = {})
# %relu_90 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_90,), kwargs = {})
# %add_44 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_43, %relu_90), kwargs = {})
# %sub_91 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_183, %getitem_650), kwargs = {})
# %relu_91 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_91,), kwargs = {})
# %mul_45 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_44, %relu_91), kwargs = {})
# %sub_92 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_91, %mul_45), kwargs = {})
# %relu_92 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_92,), kwargs = {})
# %add_45 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_44, %relu_92), kwargs = {})
# %sub_93 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_187, %getitem_664), kwargs = {})
# %relu_93 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_93,), kwargs = {})
# %mul_46 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_45, %relu_93), kwargs = {})
# %sub_94 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_93, %mul_46), kwargs = {})
# %relu_94 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_94,), kwargs = {})
# %add_46 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_45, %relu_94), kwargs = {})
# %sub_95 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_191, %getitem_678), kwargs = {})
# %relu_95 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_95,), kwargs = {})
# %mul_47 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_46, %relu_95), kwargs = {})
# %sub_96 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_95, %mul_47), kwargs = {})
# %relu_96 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_96,), kwargs = {})
# %add_47 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_46, %relu_96), kwargs = {})
# %sub_97 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_195, %getitem_692), kwargs = {})
# %relu_97 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_97,), kwargs = {})
# %mul_48 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_47, %relu_97), kwargs = {})
# %sub_98 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_97, %mul_48), kwargs = {})
# %relu_98 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_98,), kwargs = {})
# %add_48 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_47, %relu_98), kwargs = {})
# %sub_99 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_199, %getitem_706), kwargs = {})
# %relu_99 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_99,), kwargs = {})
# %mul_49 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_48, %relu_99), kwargs = {})
# %sub_100 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_99, %mul_49), kwargs = {})
# %relu_100 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_100,), kwargs = {})
# %add_49 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_48, %relu_100), kwargs = {})
triton_poi_fused_add_minimum_mul_neg_relu_sub_6 = async_compile.triton('triton_poi_fused_add_minimum_mul_neg_relu_sub_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: '*fp32', 18: '*fp32', 19: '*fp32', 20: '*fp32', 21: '*fp32', 22: '*fp32', 23: '*fp32', 24: '*fp32', 25: '*fp32', 26: '*fp32', 27: '*fp32', 28: '*fp32', 29: '*fp32', 30: '*fp32', 31: '*fp32', 32: '*fp32', 33: '*fp32', 34: '*fp32', 35: '*fp32', 36: '*fp32', 37: '*fp32', 38: '*fp32', 39: '*fp32', 40: '*fp32', 41: '*fp32', 42: '*fp32', 43: '*fp32', 44: '*fp32', 45: '*fp32', 46: '*fp32', 47: '*fp32', 48: '*fp32', 49: '*fp32', 50: '*fp32', 51: '*fp32', 52: '*fp32', 53: '*fp32', 54: '*fp32', 55: '*fp32', 56: '*fp32', 57: '*fp32', 58: '*fp32', 59: '*fp32', 60: '*fp32', 61: '*fp32', 62: '*fp32', 63: '*fp32', 64: '*fp32', 65: '*fp32', 66: '*fp32', 67: '*fp32', 68: '*fp32', 69: '*fp32', 70: '*fp32', 71: '*fp32', 72: '*fp32', 73: '*fp32', 74: '*fp32', 75: '*fp32', 76: '*fp32', 77: '*fp32', 78: '*fp32', 79: '*fp32', 80: '*fp32', 81: '*fp32', 82: '*fp32', 83: '*fp32', 84: '*fp32', 85: '*fp32', 86: '*fp32', 87: '*fp32', 88: '*fp32', 89: '*fp32', 90: '*fp32', 91: '*fp32', 92: '*fp32', 93: '*fp32', 94: '*fp32', 95: '*fp32', 96: '*fp32', 97: '*fp32', 98: '*fp32', 99: '*fp32', 100: '*fp32', 101: '*fp32', 102: '*fp32', 103: '*fp32', 104: '*fp32', 105: '*fp32', 106: '*fp32', 107: '*fp32', 108: '*fp32', 109: '*fp32', 110: '*fp32', 111: '*fp32', 112: '*fp32', 113: '*fp32', 114: '*fp32', 115: '*fp32', 116: '*fp32', 117: '*fp32', 118: '*fp32', 119: '*fp32', 120: '*fp32', 121: '*fp32', 122: '*fp32', 123: '*fp32', 124: '*fp32', 125: '*fp32', 126: '*fp32', 127: '*fp32', 128: '*fp32', 129: '*fp32', 130: '*fp32', 131: '*fp32', 132: '*fp32', 133: '*fp32', 134: '*fp32', 135: '*fp32', 136: '*fp32', 137: '*fp32', 138: '*fp32', 139: '*fp32', 140: '*fp32', 141: '*fp32', 142: '*fp32', 143: '*fp32', 144: '*fp32', 145: '*fp32', 146: '*fp32', 147: '*fp32', 148: '*fp32', 149: '*fp32', 150: '*fp32', 151: '*fp32', 152: '*fp32', 153: '*fp32', 154: '*fp32', 155: '*fp32', 156: '*fp32', 157: '*fp32', 158: '*fp32', 159: '*fp32', 160: '*fp32', 161: '*fp32', 162: '*fp32', 163: '*fp32', 164: '*fp32', 165: '*fp32', 166: '*fp32', 167: '*fp32', 168: '*fp32', 169: '*fp32', 170: '*fp32', 171: '*fp32', 172: '*fp32', 173: '*fp32', 174: '*fp32', 175: '*fp32', 176: '*fp32', 177: '*fp32', 178: '*fp32', 179: '*fp32', 180: '*fp32', 181: '*fp32', 182: '*fp32', 183: '*fp32', 184: '*fp32', 185: '*fp32', 186: '*fp32', 187: '*fp32', 188: '*fp32', 189: '*fp32', 190: '*fp32', 191: '*fp32', 192: '*fp32', 193: '*fp32', 194: '*fp32', 195: '*fp32', 196: '*fp32', 197: '*fp32', 198: '*fp32', 199: '*fp32', 200: '*fp32', 201: '*fp32', 202: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_minimum_mul_neg_relu_sub_6', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1', 'in_out_ptr10', 'in_out_ptr11', 'in_out_ptr12', 'in_out_ptr13', 'in_out_ptr14', 'in_out_ptr15', 'in_out_ptr16', 'in_out_ptr17', 'in_out_ptr18', 'in_out_ptr19', 'in_out_ptr2', 'in_out_ptr20', 'in_out_ptr21', 'in_out_ptr22', 'in_out_ptr3', 'in_out_ptr4', 'in_out_ptr5', 'in_out_ptr6', 'in_out_ptr7', 'in_out_ptr8', 'in_out_ptr9'], 'no_x_dim': False, 'num_load': 202, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_minimum_mul_neg_relu_sub_6(in_out_ptr0, in_out_ptr1, in_out_ptr2, in_out_ptr3, in_out_ptr4, in_out_ptr5, in_out_ptr6, in_out_ptr7, in_out_ptr8, in_out_ptr9, in_out_ptr10, in_out_ptr11, in_out_ptr12, in_out_ptr13, in_out_ptr14, in_out_ptr15, in_out_ptr16, in_out_ptr17, in_out_ptr18, in_out_ptr19, in_out_ptr20, in_out_ptr21, in_out_ptr22, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24, in_ptr25, in_ptr26, in_ptr27, in_ptr28, in_ptr29, in_ptr30, in_ptr31, in_ptr32, in_ptr33, in_ptr34, in_ptr35, in_ptr36, in_ptr37, in_ptr38, in_ptr39, in_ptr40, in_ptr41, in_ptr42, in_ptr43, in_ptr44, in_ptr45, in_ptr46, in_ptr47, in_ptr48, in_ptr49, in_ptr50, in_ptr51, in_ptr52, in_ptr53, in_ptr54, in_ptr55, in_ptr56, in_ptr57, in_ptr58, in_ptr59, in_ptr60, in_ptr61, in_ptr62, in_ptr63, in_ptr64, in_ptr65, in_ptr66, in_ptr67, in_ptr68, in_ptr69, in_ptr70, in_ptr71, in_ptr72, in_ptr73, in_ptr74, in_ptr75, in_ptr76, in_ptr77, in_ptr78, in_ptr79, in_ptr80, in_ptr81, in_ptr82, in_ptr83, in_ptr84, in_ptr85, in_ptr86, in_ptr87, in_ptr88, in_ptr89, in_ptr90, in_ptr91, in_ptr92, in_ptr93, in_ptr94, in_ptr95, in_ptr96, in_ptr97, in_ptr98, in_ptr99, in_ptr100, in_ptr101, in_ptr102, in_ptr103, in_ptr104, in_ptr105, in_ptr106, in_ptr107, in_ptr108, in_ptr109, in_ptr110, in_ptr111, in_ptr112, in_ptr113, in_ptr114, in_ptr115, in_ptr116, in_ptr117, in_ptr118, in_ptr119, in_ptr120, in_ptr121, in_ptr122, in_ptr123, in_ptr124, in_ptr125, in_ptr126, in_ptr127, in_ptr128, in_ptr129, in_ptr130, in_ptr131, in_ptr132, in_ptr133, in_ptr134, in_ptr135, in_ptr136, in_ptr137, in_ptr138, in_ptr139, in_ptr140, in_ptr141, in_ptr142, in_ptr143, in_ptr144, in_ptr145, in_ptr146, in_ptr147, in_ptr148, in_ptr149, in_ptr150, in_ptr151, in_ptr152, in_ptr153, in_ptr154, in_ptr155, in_ptr156, in_ptr157, in_ptr158, in_ptr159, in_ptr160, in_ptr161, in_ptr162, in_ptr163, in_ptr164, in_ptr165, in_ptr166, in_ptr167, in_ptr168, in_ptr169, in_ptr170, in_ptr171, in_ptr172, in_ptr173, in_ptr174, in_ptr175, in_ptr176, in_ptr177, in_ptr178, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_out_ptr0 + (x0), xmask)
tmp5 = tl.load(in_ptr1 + (x0), xmask)
tmp7 = tl.load(in_ptr2 + (x0), xmask)
tmp10 = tl.load(in_ptr3 + (x0), xmask)
tmp13 = tl.load(in_ptr4 + (x0), xmask)
tmp20 = tl.load(in_ptr5 + (x0), xmask)
tmp22 = tl.load(in_ptr6 + (x0), xmask)
tmp25 = tl.load(in_ptr7 + (x0), xmask)
tmp28 = tl.load(in_ptr8 + (x0), xmask)
tmp35 = tl.load(in_ptr9 + (x0), xmask)
tmp37 = tl.load(in_ptr10 + (x0), xmask)
tmp40 = tl.load(in_ptr11 + (x0), xmask)
tmp43 = tl.load(in_ptr12 + (x0), xmask)
tmp50 = tl.load(in_out_ptr1 + (x0), xmask)
tmp52 = tl.load(in_ptr13 + (x0), xmask)
tmp55 = tl.load(in_ptr14 + (x0), xmask)
tmp58 = tl.load(in_ptr15 + (x0), xmask)
tmp65 = tl.load(in_out_ptr2 + (x0), xmask)
tmp67 = tl.load(in_ptr16 + (x0), xmask)
tmp70 = tl.load(in_ptr17 + (x0), xmask)
tmp73 = tl.load(in_ptr18 + (x0), xmask)
tmp80 = tl.load(in_out_ptr3 + (x0), xmask)
tmp82 = tl.load(in_ptr19 + (x0), xmask)
tmp85 = tl.load(in_ptr20 + (x0), xmask)
tmp88 = tl.load(in_ptr21 + (x0), xmask)
tmp95 = tl.load(in_out_ptr4 + (x0), xmask)
tmp97 = tl.load(in_ptr22 + (x0), xmask)
tmp100 = tl.load(in_ptr23 + (x0), xmask)
tmp103 = tl.load(in_ptr24 + (x0), xmask)
tmp110 = tl.load(in_out_ptr5 + (x0), xmask)
tmp112 = tl.load(in_ptr25 + (x0), xmask)
tmp115 = tl.load(in_ptr26 + (x0), xmask)
tmp118 = tl.load(in_ptr27 + (x0), xmask)
tmp125 = tl.load(in_out_ptr6 + (x0), xmask)
tmp127 = tl.load(in_ptr28 + (x0), xmask)
tmp130 = tl.load(in_ptr29 + (x0), xmask)
tmp133 = tl.load(in_ptr30 + (x0), xmask)
tmp140 = tl.load(in_out_ptr7 + (x0), xmask)
tmp142 = tl.load(in_ptr31 + (x0), xmask)
tmp145 = tl.load(in_ptr32 + (x0), xmask)
tmp148 = tl.load(in_ptr33 + (x0), xmask)
tmp155 = tl.load(in_ptr34 + (x0), xmask)
tmp157 = tl.load(in_ptr35 + (x0), xmask)
tmp160 = tl.load(in_ptr36 + (x0), xmask)
tmp163 = tl.load(in_ptr37 + (x0), xmask)
tmp170 = tl.load(in_ptr38 + (x0), xmask)
tmp172 = tl.load(in_ptr39 + (x0), xmask)
tmp175 = tl.load(in_ptr40 + (x0), xmask)
tmp178 = tl.load(in_ptr41 + (x0), xmask)
tmp185 = tl.load(in_ptr42 + (x0), xmask)
tmp187 = tl.load(in_ptr43 + (x0), xmask)
tmp190 = tl.load(in_ptr44 + (x0), xmask)
tmp193 = tl.load(in_ptr45 + (x0), xmask)
tmp200 = tl.load(in_ptr46 + (x0), xmask)
tmp202 = tl.load(in_ptr47 + (x0), xmask)
tmp205 = tl.load(in_ptr48 + (x0), xmask)
tmp208 = tl.load(in_ptr49 + (x0), xmask)
tmp215 = tl.load(in_ptr50 + (x0), xmask)
tmp217 = tl.load(in_ptr51 + (x0), xmask)
tmp220 = tl.load(in_ptr52 + (x0), xmask)
tmp223 = tl.load(in_ptr53 + (x0), xmask)
tmp230 = tl.load(in_ptr54 + (x0), xmask)
tmp232 = tl.load(in_ptr55 + (x0), xmask)
tmp235 = tl.load(in_ptr56 + (x0), xmask)
tmp238 = tl.load(in_ptr57 + (x0), xmask)
tmp245 = tl.load(in_ptr58 + (x0), xmask)
tmp247 = tl.load(in_ptr59 + (x0), xmask)
tmp250 = tl.load(in_ptr60 + (x0), xmask)
tmp253 = tl.load(in_ptr61 + (x0), xmask)
tmp260 = tl.load(in_ptr62 + (x0), xmask)
tmp262 = tl.load(in_ptr63 + (x0), xmask)
tmp265 = tl.load(in_ptr64 + (x0), xmask)
tmp268 = tl.load(in_ptr65 + (x0), xmask)
tmp275 = tl.load(in_ptr66 + (x0), xmask)
tmp277 = tl.load(in_ptr67 + (x0), xmask)
tmp280 = tl.load(in_ptr68 + (x0), xmask)
tmp283 = tl.load(in_ptr69 + (x0), xmask)
tmp290 = tl.load(in_ptr70 + (x0), xmask)
tmp292 = tl.load(in_ptr71 + (x0), xmask)
tmp295 = tl.load(in_ptr72 + (x0), xmask)
tmp298 = tl.load(in_ptr73 + (x0), xmask)
tmp305 = tl.load(in_ptr74 + (x0), xmask)
tmp307 = tl.load(in_ptr75 + (x0), xmask)
tmp310 = tl.load(in_ptr76 + (x0), xmask)
tmp313 = tl.load(in_ptr77 + (x0), xmask)
tmp320 = tl.load(in_ptr78 + (x0), xmask)
tmp322 = tl.load(in_ptr79 + (x0), xmask)
tmp325 = tl.load(in_ptr80 + (x0), xmask)
tmp328 = tl.load(in_ptr81 + (x0), xmask)
tmp335 = tl.load(in_ptr82 + (x0), xmask)
tmp337 = tl.load(in_ptr83 + (x0), xmask)
tmp340 = tl.load(in_ptr84 + (x0), xmask)
tmp343 = tl.load(in_ptr85 + (x0), xmask)
tmp350 = tl.load(in_ptr86 + (x0), xmask)
tmp352 = tl.load(in_ptr87 + (x0), xmask)
tmp355 = tl.load(in_ptr88 + (x0), xmask)
tmp358 = tl.load(in_ptr89 + (x0), xmask)
tmp365 = tl.load(in_ptr90 + (x0), xmask)
tmp367 = tl.load(in_ptr91 + (x0), xmask)
tmp370 = tl.load(in_ptr92 + (x0), xmask)
tmp373 = tl.load(in_ptr93 + (x0), xmask)
tmp380 = tl.load(in_ptr94 + (x0), xmask)
tmp382 = tl.load(in_ptr95 + (x0), xmask)
tmp385 = tl.load(in_ptr96 + (x0), xmask)
tmp388 = tl.load(in_ptr97 + (x0), xmask)
tmp395 = tl.load(in_ptr98 + (x0), xmask)
tmp397 = tl.load(in_ptr99 + (x0), xmask)
tmp400 = tl.load(in_ptr100 + (x0), xmask)
tmp403 = tl.load(in_ptr101 + (x0), xmask)
tmp410 = tl.load(in_ptr102 + (x0), xmask)
tmp412 = tl.load(in_ptr103 + (x0), xmask)
tmp415 = tl.load(in_ptr104 + (x0), xmask)
tmp418 = tl.load(in_ptr105 + (x0), xmask)
tmp425 = tl.load(in_ptr106 + (x0), xmask)
tmp427 = tl.load(in_ptr107 + (x0), xmask)
tmp430 = tl.load(in_ptr108 + (x0), xmask)
tmp433 = tl.load(in_ptr109 + (x0), xmask)
tmp440 = tl.load(in_ptr110 + (x0), xmask)
tmp442 = tl.load(in_ptr111 + (x0), xmask)
tmp445 = tl.load(in_ptr112 + (x0), xmask)
tmp448 = tl.load(in_ptr113 + (x0), xmask)
tmp455 = tl.load(in_ptr114 + (x0), xmask)
tmp457 = tl.load(in_ptr115 + (x0), xmask)
tmp460 = tl.load(in_ptr116 + (x0), xmask)
tmp463 = tl.load(in_ptr117 + (x0), xmask)
tmp470 = tl.load(in_ptr118 + (x0), xmask)
tmp472 = tl.load(in_ptr119 + (x0), xmask)
tmp475 = tl.load(in_ptr120 + (x0), xmask)
tmp478 = tl.load(in_ptr121 + (x0), xmask)
tmp485 = tl.load(in_ptr122 + (x0), xmask)
tmp487 = tl.load(in_ptr123 + (x0), xmask)
tmp490 = tl.load(in_ptr124 + (x0), xmask)
tmp493 = tl.load(in_ptr125 + (x0), xmask)
tmp500 = tl.load(in_ptr126 + (x0), xmask)
tmp502 = tl.load(in_ptr127 + (x0), xmask)
tmp505 = tl.load(in_ptr128 + (x0), xmask)
tmp508 = tl.load(in_ptr129 + (x0), xmask)
tmp515 = tl.load(in_ptr130 + (x0), xmask)
tmp517 = tl.load(in_ptr131 + (x0), xmask)
tmp520 = tl.load(in_ptr132 + (x0), xmask)
tmp523 = tl.load(in_ptr133 + (x0), xmask)
tmp530 = tl.load(in_ptr134 + (x0), xmask)
tmp532 = tl.load(in_ptr135 + (x0), xmask)
tmp535 = tl.load(in_out_ptr8 + (x0), xmask)
tmp538 = tl.load(in_ptr136 + (x0), xmask)
tmp545 = tl.load(in_out_ptr9 + (x0), xmask)
tmp547 = tl.load(in_ptr137 + (x0), xmask)
tmp550 = tl.load(in_ptr138 + (x0), xmask)
tmp553 = tl.load(in_ptr139 + (x0), xmask)
tmp560 = tl.load(in_out_ptr10 + (x0), xmask)
tmp562 = tl.load(in_ptr140 + (x0), xmask)
tmp565 = tl.load(in_ptr141 + (x0), xmask)
tmp568 = tl.load(in_ptr142 + (x0), xmask)
tmp575 = tl.load(in_out_ptr11 + (x0), xmask)
tmp577 = tl.load(in_ptr143 + (x0), xmask)
tmp580 = tl.load(in_ptr144 + (x0), xmask)
tmp583 = tl.load(in_ptr145 + (x0), xmask)
tmp590 = tl.load(in_out_ptr12 + (x0), xmask)
tmp592 = tl.load(in_ptr146 + (x0), xmask)
tmp595 = tl.load(in_ptr147 + (x0), xmask)
tmp598 = tl.load(in_ptr148 + (x0), xmask)
tmp605 = tl.load(in_out_ptr13 + (x0), xmask)
tmp607 = tl.load(in_ptr149 + (x0), xmask)
tmp610 = tl.load(in_ptr150 + (x0), xmask)
tmp613 = tl.load(in_ptr151 + (x0), xmask)
tmp620 = tl.load(in_out_ptr14 + (x0), xmask)
tmp622 = tl.load(in_ptr152 + (x0), xmask)
tmp625 = tl.load(in_ptr153 + (x0), xmask)
tmp628 = tl.load(in_ptr154 + (x0), xmask)
tmp635 = tl.load(in_out_ptr15 + (x0), xmask)
tmp637 = tl.load(in_ptr155 + (x0), xmask)
tmp640 = tl.load(in_ptr156 + (x0), xmask)
tmp643 = tl.load(in_ptr157 + (x0), xmask)
tmp650 = tl.load(in_out_ptr16 + (x0), xmask)
tmp652 = tl.load(in_ptr158 + (x0), xmask)
tmp655 = tl.load(in_ptr159 + (x0), xmask)
tmp658 = tl.load(in_ptr160 + (x0), xmask)
tmp665 = tl.load(in_out_ptr17 + (x0), xmask)
tmp667 = tl.load(in_ptr161 + (x0), xmask)
tmp670 = tl.load(in_ptr162 + (x0), xmask)
tmp673 = tl.load(in_ptr163 + (x0), xmask)
tmp680 = tl.load(in_out_ptr18 + (x0), xmask)
tmp682 = tl.load(in_ptr164 + (x0), xmask)
tmp685 = tl.load(in_ptr165 + (x0), xmask)
tmp688 = tl.load(in_ptr166 + (x0), xmask)
tmp695 = tl.load(in_out_ptr19 + (x0), xmask)
tmp697 = tl.load(in_ptr167 + (x0), xmask)
tmp700 = tl.load(in_ptr168 + (x0), xmask)
tmp703 = tl.load(in_ptr169 + (x0), xmask)
tmp710 = tl.load(in_out_ptr20 + (x0), xmask)
tmp712 = tl.load(in_ptr170 + (x0), xmask)
tmp715 = tl.load(in_ptr171 + (x0), xmask)
tmp718 = tl.load(in_ptr172 + (x0), xmask)
tmp725 = tl.load(in_out_ptr21 + (x0), xmask)
tmp727 = tl.load(in_ptr173 + (x0), xmask)
tmp730 = tl.load(in_ptr174 + (x0), xmask)
tmp733 = tl.load(in_ptr175 + (x0), xmask)
tmp740 = tl.load(in_out_ptr22 + (x0), xmask)
tmp742 = tl.load(in_ptr176 + (x0), xmask)
tmp745 = tl.load(in_ptr177 + (x0), xmask)
tmp748 = tl.load(in_ptr178 + (x0), xmask)
tmp2 = tmp0 - tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = -tmp5
tmp8 = -tmp7
tmp9 = triton_helpers.minimum(tmp6, tmp8)
tmp11 = -tmp10
tmp12 = triton_helpers.minimum(tmp9, tmp11)
tmp14 = tmp12 - tmp13
tmp15 = triton_helpers.maximum(tmp3, tmp14)
tmp16 = tmp4 * tmp15
tmp17 = tmp15 - tmp16
tmp18 = triton_helpers.maximum(tmp3, tmp17)
tmp19 = tmp4 + tmp18
tmp21 = -tmp20
tmp23 = -tmp22
tmp24 = triton_helpers.minimum(tmp21, tmp23)
tmp26 = -tmp25
tmp27 = triton_helpers.minimum(tmp24, tmp26)
tmp29 = tmp27 - tmp28
tmp30 = triton_helpers.maximum(tmp3, tmp29)
tmp31 = tmp19 * tmp30
tmp32 = tmp30 - tmp31
tmp33 = triton_helpers.maximum(tmp3, tmp32)
tmp34 = tmp19 + tmp33
tmp36 = -tmp35
tmp38 = -tmp37
tmp39 = triton_helpers.minimum(tmp36, tmp38)
tmp41 = -tmp40
tmp42 = triton_helpers.minimum(tmp39, tmp41)
tmp44 = tmp42 - tmp43
tmp45 = triton_helpers.maximum(tmp3, tmp44)
tmp46 = tmp34 * tmp45
tmp47 = tmp45 - tmp46
tmp48 = triton_helpers.maximum(tmp3, tmp47)
tmp49 = tmp34 + tmp48
tmp51 = -tmp50
tmp53 = -tmp52
tmp54 = triton_helpers.minimum(tmp51, tmp53)
tmp56 = -tmp55
tmp57 = triton_helpers.minimum(tmp54, tmp56)
tmp59 = tmp57 - tmp58
tmp60 = triton_helpers.maximum(tmp3, tmp59)
tmp61 = tmp49 * tmp60
tmp62 = tmp60 - tmp61
tmp63 = triton_helpers.maximum(tmp3, tmp62)
tmp64 = tmp49 + tmp63
tmp66 = -tmp65
tmp68 = -tmp67
tmp69 = triton_helpers.minimum(tmp66, tmp68)
tmp71 = -tmp70
tmp72 = triton_helpers.minimum(tmp69, tmp71)
tmp74 = tmp72 - tmp73
tmp75 = triton_helpers.maximum(tmp3, tmp74)
tmp76 = tmp64 * tmp75
tmp77 = tmp75 - tmp76
tmp78 = triton_helpers.maximum(tmp3, tmp77)
tmp79 = tmp64 + tmp78
tmp81 = -tmp80
tmp83 = -tmp82
tmp84 = triton_helpers.minimum(tmp81, tmp83)
tmp86 = -tmp85
tmp87 = triton_helpers.minimum(tmp84, tmp86)
tmp89 = tmp87 - tmp88
tmp90 = triton_helpers.maximum(tmp3, tmp89)
tmp91 = tmp79 * tmp90
tmp92 = tmp90 - tmp91
tmp93 = triton_helpers.maximum(tmp3, tmp92)
tmp94 = tmp79 + tmp93
tmp96 = -tmp95
tmp98 = -tmp97
tmp99 = triton_helpers.minimum(tmp96, tmp98)
tmp101 = -tmp100
tmp102 = triton_helpers.minimum(tmp99, tmp101)
tmp104 = tmp102 - tmp103
tmp105 = triton_helpers.maximum(tmp3, tmp104)
tmp106 = tmp94 * tmp105
tmp107 = tmp105 - tmp106
tmp108 = triton_helpers.maximum(tmp3, tmp107)
tmp109 = tmp94 + tmp108
tmp111 = -tmp110
tmp113 = -tmp112
tmp114 = triton_helpers.minimum(tmp111, tmp113)
tmp116 = -tmp115
tmp117 = triton_helpers.minimum(tmp114, tmp116)
tmp119 = tmp117 - tmp118
tmp120 = triton_helpers.maximum(tmp3, tmp119)
tmp121 = tmp109 * tmp120
tmp122 = tmp120 - tmp121
tmp123 = triton_helpers.maximum(tmp3, tmp122)
tmp124 = tmp109 + tmp123
tmp126 = -tmp125
tmp128 = -tmp127
tmp129 = triton_helpers.minimum(tmp126, tmp128)
tmp131 = -tmp130
tmp132 = triton_helpers.minimum(tmp129, tmp131)
tmp134 = tmp132 - tmp133
tmp135 = triton_helpers.maximum(tmp3, tmp134)
tmp136 = tmp124 * tmp135
tmp137 = tmp135 - tmp136
tmp138 = triton_helpers.maximum(tmp3, tmp137)
tmp139 = tmp124 + tmp138
tmp141 = -tmp140
tmp143 = -tmp142
tmp144 = triton_helpers.minimum(tmp141, tmp143)
tmp146 = -tmp145
tmp147 = triton_helpers.minimum(tmp144, tmp146)
tmp149 = tmp147 - tmp148
tmp150 = triton_helpers.maximum(tmp3, tmp149)
tmp151 = tmp139 * tmp150
tmp152 = tmp150 - tmp151
tmp153 = triton_helpers.maximum(tmp3, tmp152)
tmp154 = tmp139 + tmp153
tmp156 = -tmp155
tmp158 = -tmp157
tmp159 = triton_helpers.minimum(tmp156, tmp158)
tmp161 = -tmp160
tmp162 = triton_helpers.minimum(tmp159, tmp161)
tmp164 = tmp162 - tmp163
tmp165 = triton_helpers.maximum(tmp3, tmp164)
tmp166 = tmp154 * tmp165
tmp167 = tmp165 - tmp166
tmp168 = triton_helpers.maximum(tmp3, tmp167)
tmp169 = tmp154 + tmp168
tmp171 = -tmp170
tmp173 = -tmp172
tmp174 = triton_helpers.minimum(tmp171, tmp173)
tmp176 = -tmp175
tmp177 = triton_helpers.minimum(tmp174, tmp176)
tmp179 = tmp177 - tmp178
tmp180 = triton_helpers.maximum(tmp3, tmp179)
tmp181 = tmp169 * tmp180
tmp182 = tmp180 - tmp181
tmp183 = triton_helpers.maximum(tmp3, tmp182)
tmp184 = tmp169 + tmp183
tmp186 = -tmp185
tmp188 = -tmp187
tmp189 = triton_helpers.minimum(tmp186, tmp188)
tmp191 = -tmp190
tmp192 = triton_helpers.minimum(tmp189, tmp191)
tmp194 = tmp192 - tmp193
tmp195 = triton_helpers.maximum(tmp3, tmp194)
tmp196 = tmp184 * tmp195
tmp197 = tmp195 - tmp196
tmp198 = triton_helpers.maximum(tmp3, tmp197)
tmp199 = tmp184 + tmp198
tmp201 = -tmp200
tmp203 = -tmp202
tmp204 = triton_helpers.minimum(tmp201, tmp203)
tmp206 = -tmp205
tmp207 = triton_helpers.minimum(tmp204, tmp206)
tmp209 = tmp207 - tmp208
tmp210 = triton_helpers.maximum(tmp3, tmp209)
tmp211 = tmp199 * tmp210
tmp212 = tmp210 - tmp211
tmp213 = triton_helpers.maximum(tmp3, tmp212)
tmp214 = tmp199 + tmp213
tmp216 = -tmp215
tmp218 = -tmp217
tmp219 = triton_helpers.minimum(tmp216, tmp218)
tmp221 = -tmp220
tmp222 = triton_helpers.minimum(tmp219, tmp221)
tmp224 = tmp222 - tmp223
tmp225 = triton_helpers.maximum(tmp3, tmp224)
tmp226 = tmp214 * tmp225
tmp227 = tmp225 - tmp226
tmp228 = triton_helpers.maximum(tmp3, tmp227)
tmp229 = tmp214 + tmp228
tmp231 = -tmp230
tmp233 = -tmp232
tmp234 = triton_helpers.minimum(tmp231, tmp233)
tmp236 = -tmp235
tmp237 = triton_helpers.minimum(tmp234, tmp236)
tmp239 = tmp237 - tmp238
tmp240 = triton_helpers.maximum(tmp3, tmp239)
tmp241 = tmp229 * tmp240
tmp242 = tmp240 - tmp241
tmp243 = triton_helpers.maximum(tmp3, tmp242)
tmp244 = tmp229 + tmp243
tmp246 = -tmp245
tmp248 = -tmp247
tmp249 = triton_helpers.minimum(tmp246, tmp248)
tmp251 = -tmp250
tmp252 = triton_helpers.minimum(tmp249, tmp251)
tmp254 = tmp252 - tmp253
tmp255 = triton_helpers.maximum(tmp3, tmp254)
tmp256 = tmp244 * tmp255
tmp257 = tmp255 - tmp256
tmp258 = triton_helpers.maximum(tmp3, tmp257)
tmp259 = tmp244 + tmp258
tmp261 = -tmp260
tmp263 = -tmp262
tmp264 = triton_helpers.minimum(tmp261, tmp263)
tmp266 = -tmp265
tmp267 = triton_helpers.minimum(tmp264, tmp266)
tmp269 = tmp267 - tmp268
tmp270 = triton_helpers.maximum(tmp3, tmp269)
tmp271 = tmp259 * tmp270
tmp272 = tmp270 - tmp271
tmp273 = triton_helpers.maximum(tmp3, tmp272)
tmp274 = tmp259 + tmp273
tmp276 = -tmp275
tmp278 = -tmp277
tmp279 = triton_helpers.minimum(tmp276, tmp278)
tmp281 = -tmp280
tmp282 = triton_helpers.minimum(tmp279, tmp281)
tmp284 = tmp282 - tmp283
tmp285 = triton_helpers.maximum(tmp3, tmp284)
tmp286 = tmp274 * tmp285
tmp287 = tmp285 - tmp286
tmp288 = triton_helpers.maximum(tmp3, tmp287)
tmp289 = tmp274 + tmp288
tmp291 = -tmp290
tmp293 = -tmp292
tmp294 = triton_helpers.minimum(tmp291, tmp293)
tmp296 = -tmp295
tmp297 = triton_helpers.minimum(tmp294, tmp296)
tmp299 = tmp297 - tmp298
tmp300 = triton_helpers.maximum(tmp3, tmp299)
tmp301 = tmp289 * tmp300
tmp302 = tmp300 - tmp301
tmp303 = triton_helpers.maximum(tmp3, tmp302)
tmp304 = tmp289 + tmp303
tmp306 = -tmp305
tmp308 = -tmp307
tmp309 = triton_helpers.minimum(tmp306, tmp308)
tmp311 = -tmp310
tmp312 = triton_helpers.minimum(tmp309, tmp311)
tmp314 = tmp312 - tmp313
tmp315 = triton_helpers.maximum(tmp3, tmp314)
tmp316 = tmp304 * tmp315
tmp317 = tmp315 - tmp316
tmp318 = triton_helpers.maximum(tmp3, tmp317)
tmp319 = tmp304 + tmp318
tmp321 = -tmp320
tmp323 = -tmp322
tmp324 = triton_helpers.minimum(tmp321, tmp323)
tmp326 = -tmp325
tmp327 = triton_helpers.minimum(tmp324, tmp326)
tmp329 = tmp327 - tmp328
tmp330 = triton_helpers.maximum(tmp3, tmp329)
tmp331 = tmp319 * tmp330
tmp332 = tmp330 - tmp331
tmp333 = triton_helpers.maximum(tmp3, tmp332)
tmp334 = tmp319 + tmp333
tmp336 = -tmp335
tmp338 = -tmp337
tmp339 = triton_helpers.minimum(tmp336, tmp338)
tmp341 = -tmp340
tmp342 = triton_helpers.minimum(tmp339, tmp341)
tmp344 = tmp342 - tmp343
tmp345 = triton_helpers.maximum(tmp3, tmp344)
tmp346 = tmp334 * tmp345
tmp347 = tmp345 - tmp346
tmp348 = triton_helpers.maximum(tmp3, tmp347)
tmp349 = tmp334 + tmp348
tmp351 = -tmp350
tmp353 = -tmp352
tmp354 = triton_helpers.minimum(tmp351, tmp353)
tmp356 = -tmp355
tmp357 = triton_helpers.minimum(tmp354, tmp356)
tmp359 = tmp357 - tmp358
tmp360 = triton_helpers.maximum(tmp3, tmp359)
tmp361 = tmp349 * tmp360
tmp362 = tmp360 - tmp361
tmp363 = triton_helpers.maximum(tmp3, tmp362)
tmp364 = tmp349 + tmp363
tmp366 = -tmp365
tmp368 = -tmp367
tmp369 = triton_helpers.minimum(tmp366, tmp368)
tmp371 = -tmp370
tmp372 = triton_helpers.minimum(tmp369, tmp371)
tmp374 = tmp372 - tmp373
tmp375 = triton_helpers.maximum(tmp3, tmp374)
tmp376 = tmp364 * tmp375
tmp377 = tmp375 - tmp376
tmp378 = triton_helpers.maximum(tmp3, tmp377)
tmp379 = tmp364 + tmp378
tmp381 = -tmp380
tmp383 = -tmp382
tmp384 = triton_helpers.minimum(tmp381, tmp383)
tmp386 = -tmp385
tmp387 = triton_helpers.minimum(tmp384, tmp386)
tmp389 = tmp387 - tmp388
tmp390 = triton_helpers.maximum(tmp3, tmp389)
tmp391 = tmp379 * tmp390
tmp392 = tmp390 - tmp391
tmp393 = triton_helpers.maximum(tmp3, tmp392)
tmp394 = tmp379 + tmp393
tmp396 = -tmp395
tmp398 = -tmp397
tmp399 = triton_helpers.minimum(tmp396, tmp398)
tmp401 = -tmp400
tmp402 = triton_helpers.minimum(tmp399, tmp401)
tmp404 = tmp402 - tmp403
tmp405 = triton_helpers.maximum(tmp3, tmp404)
tmp406 = tmp394 * tmp405
tmp407 = tmp405 - tmp406
tmp408 = triton_helpers.maximum(tmp3, tmp407)
tmp409 = tmp394 + tmp408
tmp411 = -tmp410
tmp413 = -tmp412
tmp414 = triton_helpers.minimum(tmp411, tmp413)
tmp416 = -tmp415
tmp417 = triton_helpers.minimum(tmp414, tmp416)
tmp419 = tmp417 - tmp418
tmp420 = triton_helpers.maximum(tmp3, tmp419)
tmp421 = tmp409 * tmp420
tmp422 = tmp420 - tmp421
tmp423 = triton_helpers.maximum(tmp3, tmp422)
tmp424 = tmp409 + tmp423
tmp426 = -tmp425
tmp428 = -tmp427
tmp429 = triton_helpers.minimum(tmp426, tmp428)
tmp431 = -tmp430
tmp432 = triton_helpers.minimum(tmp429, tmp431)
tmp434 = tmp432 - tmp433
tmp435 = triton_helpers.maximum(tmp3, tmp434)
tmp436 = tmp424 * tmp435
tmp437 = tmp435 - tmp436
tmp438 = triton_helpers.maximum(tmp3, tmp437)
tmp439 = tmp424 + tmp438
tmp441 = -tmp440
tmp443 = -tmp442
tmp444 = triton_helpers.minimum(tmp441, tmp443)
tmp446 = -tmp445
tmp447 = triton_helpers.minimum(tmp444, tmp446)
tmp449 = tmp447 - tmp448
tmp450 = triton_helpers.maximum(tmp3, tmp449)
tmp451 = tmp439 * tmp450
tmp452 = tmp450 - tmp451
tmp453 = triton_helpers.maximum(tmp3, tmp452)
tmp454 = tmp439 + tmp453
tmp456 = -tmp455
tmp458 = -tmp457
tmp459 = triton_helpers.minimum(tmp456, tmp458)
tmp461 = -tmp460
tmp462 = triton_helpers.minimum(tmp459, tmp461)
tmp464 = tmp462 - tmp463
tmp465 = triton_helpers.maximum(tmp3, tmp464)
tmp466 = tmp454 * tmp465
tmp467 = tmp465 - tmp466
tmp468 = triton_helpers.maximum(tmp3, tmp467)
tmp469 = tmp454 + tmp468
tmp471 = -tmp470
tmp473 = -tmp472
tmp474 = triton_helpers.minimum(tmp471, tmp473)
tmp476 = -tmp475
tmp477 = triton_helpers.minimum(tmp474, tmp476)
tmp479 = tmp477 - tmp478
tmp480 = triton_helpers.maximum(tmp3, tmp479)
tmp481 = tmp469 * tmp480
tmp482 = tmp480 - tmp481
tmp483 = triton_helpers.maximum(tmp3, tmp482)
tmp484 = tmp469 + tmp483
tmp486 = -tmp485
tmp488 = -tmp487
tmp489 = triton_helpers.minimum(tmp486, tmp488)
tmp491 = -tmp490
tmp492 = triton_helpers.minimum(tmp489, tmp491)
tmp494 = tmp492 - tmp493
tmp495 = triton_helpers.maximum(tmp3, tmp494)
tmp496 = tmp484 * tmp495
tmp497 = tmp495 - tmp496
tmp498 = triton_helpers.maximum(tmp3, tmp497)
tmp499 = tmp484 + tmp498
tmp501 = -tmp500
tmp503 = -tmp502
tmp504 = triton_helpers.minimum(tmp501, tmp503)
tmp506 = -tmp505
tmp507 = triton_helpers.minimum(tmp504, tmp506)
tmp509 = tmp507 - tmp508
tmp510 = triton_helpers.maximum(tmp3, tmp509)
tmp511 = tmp499 * tmp510
tmp512 = tmp510 - tmp511
tmp513 = triton_helpers.maximum(tmp3, tmp512)
tmp514 = tmp499 + tmp513
tmp516 = -tmp515
tmp518 = -tmp517
tmp519 = triton_helpers.minimum(tmp516, tmp518)
tmp521 = -tmp520
tmp522 = triton_helpers.minimum(tmp519, tmp521)
tmp524 = tmp522 - tmp523
tmp525 = triton_helpers.maximum(tmp3, tmp524)
tmp526 = tmp514 * tmp525
tmp527 = tmp525 - tmp526
tmp528 = triton_helpers.maximum(tmp3, tmp527)
tmp529 = tmp514 + tmp528
tmp531 = -tmp530
tmp533 = -tmp532
tmp534 = triton_helpers.minimum(tmp531, tmp533)
tmp536 = -tmp535
tmp537 = triton_helpers.minimum(tmp534, tmp536)
tmp539 = tmp537 - tmp538
tmp540 = triton_helpers.maximum(tmp3, tmp539)
tmp541 = tmp529 * tmp540
tmp542 = tmp540 - tmp541
tmp543 = triton_helpers.maximum(tmp3, tmp542)
tmp544 = tmp529 + tmp543
tmp546 = -tmp545
tmp548 = -tmp547
tmp549 = triton_helpers.minimum(tmp546, tmp548)
tmp551 = -tmp550
tmp552 = triton_helpers.minimum(tmp549, tmp551)
tmp554 = tmp552 - tmp553
tmp555 = triton_helpers.maximum(tmp3, tmp554)
tmp556 = tmp544 * tmp555
tmp557 = tmp555 - tmp556
tmp558 = triton_helpers.maximum(tmp3, tmp557)
tmp559 = tmp544 + tmp558
tmp561 = -tmp560
tmp563 = -tmp562
tmp564 = triton_helpers.minimum(tmp561, tmp563)
tmp566 = -tmp565
tmp567 = triton_helpers.minimum(tmp564, tmp566)
tmp569 = tmp567 - tmp568
tmp570 = triton_helpers.maximum(tmp3, tmp569)
tmp571 = tmp559 * tmp570
tmp572 = tmp570 - tmp571
tmp573 = triton_helpers.maximum(tmp3, tmp572)
tmp574 = tmp559 + tmp573
tmp576 = -tmp575
tmp578 = -tmp577
tmp579 = triton_helpers.minimum(tmp576, tmp578)
tmp581 = -tmp580
tmp582 = triton_helpers.minimum(tmp579, tmp581)
tmp584 = tmp582 - tmp583
tmp585 = triton_helpers.maximum(tmp3, tmp584)
tmp586 = tmp574 * tmp585
tmp587 = tmp585 - tmp586
tmp588 = triton_helpers.maximum(tmp3, tmp587)
tmp589 = tmp574 + tmp588
tmp591 = -tmp590
tmp593 = -tmp592
tmp594 = triton_helpers.minimum(tmp591, tmp593)
tmp596 = -tmp595
tmp597 = triton_helpers.minimum(tmp594, tmp596)
tmp599 = tmp597 - tmp598
tmp600 = triton_helpers.maximum(tmp3, tmp599)
tmp601 = tmp589 * tmp600
tmp602 = tmp600 - tmp601
tmp603 = triton_helpers.maximum(tmp3, tmp602)
tmp604 = tmp589 + tmp603
tmp606 = -tmp605
tmp608 = -tmp607
tmp609 = triton_helpers.minimum(tmp606, tmp608)
tmp611 = -tmp610
tmp612 = triton_helpers.minimum(tmp609, tmp611)
tmp614 = tmp612 - tmp613
tmp615 = triton_helpers.maximum(tmp3, tmp614)
tmp616 = tmp604 * tmp615
tmp617 = tmp615 - tmp616
tmp618 = triton_helpers.maximum(tmp3, tmp617)
tmp619 = tmp604 + tmp618
tmp621 = -tmp620
tmp623 = -tmp622
tmp624 = triton_helpers.minimum(tmp621, tmp623)
tmp626 = -tmp625
tmp627 = triton_helpers.minimum(tmp624, tmp626)
tmp629 = tmp627 - tmp628
tmp630 = triton_helpers.maximum(tmp3, tmp629)
tmp631 = tmp619 * tmp630
tmp632 = tmp630 - tmp631
tmp633 = triton_helpers.maximum(tmp3, tmp632)
tmp634 = tmp619 + tmp633
tmp636 = -tmp635
tmp638 = -tmp637
tmp639 = triton_helpers.minimum(tmp636, tmp638)
tmp641 = -tmp640
tmp642 = triton_helpers.minimum(tmp639, tmp641)
tmp644 = tmp642 - tmp643
tmp645 = triton_helpers.maximum(tmp3, tmp644)
tmp646 = tmp634 * tmp645
tmp647 = tmp645 - tmp646
tmp648 = triton_helpers.maximum(tmp3, tmp647)
tmp649 = tmp634 + tmp648
tmp651 = -tmp650
tmp653 = -tmp652
tmp654 = triton_helpers.minimum(tmp651, tmp653)
tmp656 = -tmp655
tmp657 = triton_helpers.minimum(tmp654, tmp656)
tmp659 = tmp657 - tmp658
tmp660 = triton_helpers.maximum(tmp3, tmp659)
tmp661 = tmp649 * tmp660
tmp662 = tmp660 - tmp661
tmp663 = triton_helpers.maximum(tmp3, tmp662)
tmp664 = tmp649 + tmp663
tmp666 = -tmp665
tmp668 = -tmp667
tmp669 = triton_helpers.minimum(tmp666, tmp668)
tmp671 = -tmp670
tmp672 = triton_helpers.minimum(tmp669, tmp671)
tmp674 = tmp672 - tmp673
tmp675 = triton_helpers.maximum(tmp3, tmp674)
tmp676 = tmp664 * tmp675
tmp677 = tmp675 - tmp676
tmp678 = triton_helpers.maximum(tmp3, tmp677)
tmp679 = tmp664 + tmp678
tmp681 = -tmp680
tmp683 = -tmp682
tmp684 = triton_helpers.minimum(tmp681, tmp683)
tmp686 = -tmp685
tmp687 = triton_helpers.minimum(tmp684, tmp686)
tmp689 = tmp687 - tmp688
tmp690 = triton_helpers.maximum(tmp3, tmp689)
tmp691 = tmp679 * tmp690
tmp692 = tmp690 - tmp691
tmp693 = triton_helpers.maximum(tmp3, tmp692)
tmp694 = tmp679 + tmp693
tmp696 = -tmp695
tmp698 = -tmp697
tmp699 = triton_helpers.minimum(tmp696, tmp698)
tmp701 = -tmp700
tmp702 = triton_helpers.minimum(tmp699, tmp701)
tmp704 = tmp702 - tmp703
tmp705 = triton_helpers.maximum(tmp3, tmp704)
tmp706 = tmp694 * tmp705
tmp707 = tmp705 - tmp706
tmp708 = triton_helpers.maximum(tmp3, tmp707)
tmp709 = tmp694 + tmp708
tmp711 = -tmp710
tmp713 = -tmp712
tmp714 = triton_helpers.minimum(tmp711, tmp713)
tmp716 = -tmp715
tmp717 = triton_helpers.minimum(tmp714, tmp716)
tmp719 = tmp717 - tmp718
tmp720 = triton_helpers.maximum(tmp3, tmp719)
tmp721 = tmp709 * tmp720
tmp722 = tmp720 - tmp721
tmp723 = triton_helpers.maximum(tmp3, tmp722)
tmp724 = tmp709 + tmp723
tmp726 = -tmp725
tmp728 = -tmp727
tmp729 = triton_helpers.minimum(tmp726, tmp728)
tmp731 = -tmp730
tmp732 = triton_helpers.minimum(tmp729, tmp731)
tmp734 = tmp732 - tmp733
tmp735 = triton_helpers.maximum(tmp3, tmp734)
tmp736 = tmp724 * tmp735
tmp737 = tmp735 - tmp736
tmp738 = triton_helpers.maximum(tmp3, tmp737)
tmp739 = tmp724 + tmp738
tmp741 = -tmp740
tmp743 = -tmp742
tmp744 = triton_helpers.minimum(tmp741, tmp743)
tmp746 = -tmp745
tmp747 = triton_helpers.minimum(tmp744, tmp746)
tmp749 = tmp747 - tmp748
tmp750 = triton_helpers.maximum(tmp3, tmp749)
tmp751 = tmp739 * tmp750
tmp752 = tmp750 - tmp751
tmp753 = triton_helpers.maximum(tmp3, tmp752)
tmp754 = tmp739 + tmp753
tl.store(in_out_ptr22 + (x0), tmp754, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf20 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf24 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [neg, neg_2, neg_4, neg_6, neg_8, neg_10], Original ATen: [aten.neg]
stream0 = get_raw_stream(0)
triton_poi_fused_neg_0.run(arg0_1, buf0, buf4, buf8, buf16, buf20, buf24, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [neg, max_pool3d], Original ATen: [aten.neg, aten.max_pool3d_with_indices]
buf1 = torch.ops.aten.max_pool3d_with_indices.default(buf0, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf0
buf2 = buf1[0]
del buf1
# Topologically Sorted Source Nodes: [neg_2, max_pool3d_1], Original ATen: [aten.neg, aten.max_pool3d_with_indices]
buf5 = torch.ops.aten.max_pool3d_with_indices.default(buf4, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf4
buf6 = buf5[0]
del buf5
# Topologically Sorted Source Nodes: [neg_4, max_pool3d_2], Original ATen: [aten.neg, aten.max_pool3d_with_indices]
buf9 = torch.ops.aten.max_pool3d_with_indices.default(buf8, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf8
buf10 = buf9[0]
del buf9
buf12 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [p1, p3, min_1, p2, min_2], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_1.run(buf12, buf2, buf6, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1, p3, min_1, p2, min_2, img1], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf13 = torch.ops.aten.max_pool3d_with_indices.default(buf12, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf14 = buf13[0]
del buf13
# Topologically Sorted Source Nodes: [neg_6, max_pool3d_4], Original ATen: [aten.neg, aten.max_pool3d_with_indices]
buf17 = torch.ops.aten.max_pool3d_with_indices.default(buf16, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf18 = buf17[0]
del buf17
# Topologically Sorted Source Nodes: [neg_8, max_pool3d_5], Original ATen: [aten.neg, aten.max_pool3d_with_indices]
buf21 = torch.ops.aten.max_pool3d_with_indices.default(buf20, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf22 = buf21[0]
del buf21
# Topologically Sorted Source Nodes: [neg_10, max_pool3d_6], Original ATen: [aten.neg, aten.max_pool3d_with_indices]
buf25 = torch.ops.aten.max_pool3d_with_indices.default(buf24, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf26 = buf25[0]
del buf25
buf28 = buf24; del buf24 # reuse
buf32 = buf20; del buf20 # reuse
buf36 = buf16; del buf16 # reuse
buf44 = buf12; del buf12 # reuse
buf48 = buf6; del buf6 # reuse
buf52 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [p1_1, p3_1, min_3, p2_1, img, neg_12, neg_14, neg_16, neg_18, neg_20, neg_22], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf18, buf26, buf22, buf28, buf32, buf36, buf44, buf48, buf52, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_1, p3_1, min_3, p2_1, img, neg_12, max_pool3d_7], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf29 = torch.ops.aten.max_pool3d_with_indices.default(buf28, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf28
buf30 = buf29[0]
del buf29
# Topologically Sorted Source Nodes: [p1_1, p3_1, min_3, p2_1, img, neg_14, max_pool3d_8], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf33 = torch.ops.aten.max_pool3d_with_indices.default(buf32, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf32
buf34 = buf33[0]
del buf33
# Topologically Sorted Source Nodes: [p1_1, p3_1, min_3, p2_1, img, neg_16, max_pool3d_9], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf37 = torch.ops.aten.max_pool3d_with_indices.default(buf36, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf36
buf38 = buf37[0]
del buf37
buf40 = buf30; del buf30 # reuse
# Topologically Sorted Source Nodes: [p1_2, p3_2, min_5, p2_2, min_6], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf40, buf38, buf34, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_2, p3_2, min_5, p2_2, min_6, img1_1], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf41 = torch.ops.aten.max_pool3d_with_indices.default(buf40, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf42 = buf41[0]
del buf41
# Topologically Sorted Source Nodes: [p1_1, p3_1, min_3, p2_1, img, neg_18, max_pool3d_11], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf45 = torch.ops.aten.max_pool3d_with_indices.default(buf44, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf46 = buf45[0]
del buf45
# Topologically Sorted Source Nodes: [p1_1, p3_1, min_3, p2_1, img, neg_20, max_pool3d_12], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf49 = torch.ops.aten.max_pool3d_with_indices.default(buf48, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf50 = buf49[0]
del buf49
# Topologically Sorted Source Nodes: [p1_1, p3_1, min_3, p2_1, img, neg_22, max_pool3d_13], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf53 = torch.ops.aten.max_pool3d_with_indices.default(buf52, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf54 = buf53[0]
del buf53
buf56 = buf52; del buf52 # reuse
buf60 = buf48; del buf48 # reuse
buf64 = buf44; del buf44 # reuse
buf72 = buf40; del buf40 # reuse
buf76 = buf38; del buf38 # reuse
buf80 = buf34; del buf34 # reuse
# Topologically Sorted Source Nodes: [p1_3, p3_3, min_7, p2_3, img_1, neg_24, neg_26, neg_28, neg_30, neg_32, neg_34], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf46, buf54, buf50, buf56, buf60, buf64, buf72, buf76, buf80, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_3, p3_3, min_7, p2_3, img_1, neg_24, max_pool3d_14], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf57 = torch.ops.aten.max_pool3d_with_indices.default(buf56, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf56
buf58 = buf57[0]
del buf57
# Topologically Sorted Source Nodes: [p1_3, p3_3, min_7, p2_3, img_1, neg_26, max_pool3d_15], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf61 = torch.ops.aten.max_pool3d_with_indices.default(buf60, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf60
buf62 = buf61[0]
del buf61
# Topologically Sorted Source Nodes: [p1_3, p3_3, min_7, p2_3, img_1, neg_28, max_pool3d_16], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf65 = torch.ops.aten.max_pool3d_with_indices.default(buf64, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf64
buf66 = buf65[0]
del buf65
buf68 = buf58; del buf58 # reuse
# Topologically Sorted Source Nodes: [p1_4, p3_4, min_9, p2_4, min_10], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf68, buf66, buf62, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_4, p3_4, min_9, p2_4, min_10, img1_2], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf69 = torch.ops.aten.max_pool3d_with_indices.default(buf68, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf70 = buf69[0]
del buf69
# Topologically Sorted Source Nodes: [p1_3, p3_3, min_7, p2_3, img_1, neg_30, max_pool3d_18], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf73 = torch.ops.aten.max_pool3d_with_indices.default(buf72, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf74 = buf73[0]
del buf73
# Topologically Sorted Source Nodes: [p1_3, p3_3, min_7, p2_3, img_1, neg_32, max_pool3d_19], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf77 = torch.ops.aten.max_pool3d_with_indices.default(buf76, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf78 = buf77[0]
del buf77
# Topologically Sorted Source Nodes: [p1_3, p3_3, min_7, p2_3, img_1, neg_34, max_pool3d_20], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf81 = torch.ops.aten.max_pool3d_with_indices.default(buf80, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf82 = buf81[0]
del buf81
buf84 = buf80; del buf80 # reuse
buf88 = buf76; del buf76 # reuse
buf92 = buf72; del buf72 # reuse
buf100 = buf68; del buf68 # reuse
buf104 = buf66; del buf66 # reuse
buf108 = buf62; del buf62 # reuse
# Topologically Sorted Source Nodes: [p1_5, p3_5, min_11, p2_5, img_2, neg_36, neg_38, neg_40, neg_42, neg_44, neg_46], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf74, buf82, buf78, buf84, buf88, buf92, buf100, buf104, buf108, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_5, p3_5, min_11, p2_5, img_2, neg_36, max_pool3d_21], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf85 = torch.ops.aten.max_pool3d_with_indices.default(buf84, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf84
buf86 = buf85[0]
del buf85
# Topologically Sorted Source Nodes: [p1_5, p3_5, min_11, p2_5, img_2, neg_38, max_pool3d_22], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf89 = torch.ops.aten.max_pool3d_with_indices.default(buf88, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf88
buf90 = buf89[0]
del buf89
# Topologically Sorted Source Nodes: [p1_5, p3_5, min_11, p2_5, img_2, neg_40, max_pool3d_23], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf93 = torch.ops.aten.max_pool3d_with_indices.default(buf92, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf92
buf94 = buf93[0]
del buf93
buf96 = buf86; del buf86 # reuse
# Topologically Sorted Source Nodes: [p1_6, p3_6, min_13, p2_6, min_14], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf96, buf94, buf90, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_6, p3_6, min_13, p2_6, min_14, img1_3], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf97 = torch.ops.aten.max_pool3d_with_indices.default(buf96, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf98 = buf97[0]
del buf97
# Topologically Sorted Source Nodes: [p1_5, p3_5, min_11, p2_5, img_2, neg_42, max_pool3d_25], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf101 = torch.ops.aten.max_pool3d_with_indices.default(buf100, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf102 = buf101[0]
del buf101
# Topologically Sorted Source Nodes: [p1_5, p3_5, min_11, p2_5, img_2, neg_44, max_pool3d_26], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf105 = torch.ops.aten.max_pool3d_with_indices.default(buf104, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf106 = buf105[0]
del buf105
# Topologically Sorted Source Nodes: [p1_5, p3_5, min_11, p2_5, img_2, neg_46, max_pool3d_27], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf109 = torch.ops.aten.max_pool3d_with_indices.default(buf108, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf110 = buf109[0]
del buf109
buf112 = buf108; del buf108 # reuse
buf116 = buf104; del buf104 # reuse
buf120 = buf100; del buf100 # reuse
buf128 = buf96; del buf96 # reuse
buf132 = buf94; del buf94 # reuse
buf136 = buf90; del buf90 # reuse
# Topologically Sorted Source Nodes: [p1_7, p3_7, min_15, p2_7, img_3, neg_48, neg_50, neg_52, neg_54, neg_56, neg_58], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf102, buf110, buf106, buf112, buf116, buf120, buf128, buf132, buf136, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_7, p3_7, min_15, p2_7, img_3, neg_48, max_pool3d_28], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf113 = torch.ops.aten.max_pool3d_with_indices.default(buf112, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf112
buf114 = buf113[0]
del buf113
# Topologically Sorted Source Nodes: [p1_7, p3_7, min_15, p2_7, img_3, neg_50, max_pool3d_29], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf117 = torch.ops.aten.max_pool3d_with_indices.default(buf116, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf116
buf118 = buf117[0]
del buf117
# Topologically Sorted Source Nodes: [p1_7, p3_7, min_15, p2_7, img_3, neg_52, max_pool3d_30], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf121 = torch.ops.aten.max_pool3d_with_indices.default(buf120, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf120
buf122 = buf121[0]
del buf121
buf124 = buf114; del buf114 # reuse
# Topologically Sorted Source Nodes: [p1_8, p3_8, min_17, p2_8, min_18], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf124, buf122, buf118, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_8, p3_8, min_17, p2_8, min_18, img1_4], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf125 = torch.ops.aten.max_pool3d_with_indices.default(buf124, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf126 = buf125[0]
del buf125
# Topologically Sorted Source Nodes: [p1_7, p3_7, min_15, p2_7, img_3, neg_54, max_pool3d_32], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf129 = torch.ops.aten.max_pool3d_with_indices.default(buf128, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf130 = buf129[0]
del buf129
# Topologically Sorted Source Nodes: [p1_7, p3_7, min_15, p2_7, img_3, neg_56, max_pool3d_33], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf133 = torch.ops.aten.max_pool3d_with_indices.default(buf132, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf134 = buf133[0]
del buf133
# Topologically Sorted Source Nodes: [p1_7, p3_7, min_15, p2_7, img_3, neg_58, max_pool3d_34], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf137 = torch.ops.aten.max_pool3d_with_indices.default(buf136, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf138 = buf137[0]
del buf137
buf140 = buf136; del buf136 # reuse
buf144 = buf132; del buf132 # reuse
buf148 = buf128; del buf128 # reuse
buf156 = buf124; del buf124 # reuse
buf160 = buf122; del buf122 # reuse
buf164 = buf118; del buf118 # reuse
# Topologically Sorted Source Nodes: [p1_9, p3_9, min_19, p2_9, img_4, neg_60, neg_62, neg_64, neg_66, neg_68, neg_70], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf130, buf138, buf134, buf140, buf144, buf148, buf156, buf160, buf164, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_9, p3_9, min_19, p2_9, img_4, neg_60, max_pool3d_35], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf141 = torch.ops.aten.max_pool3d_with_indices.default(buf140, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf140
buf142 = buf141[0]
del buf141
# Topologically Sorted Source Nodes: [p1_9, p3_9, min_19, p2_9, img_4, neg_62, max_pool3d_36], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf145 = torch.ops.aten.max_pool3d_with_indices.default(buf144, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf144
buf146 = buf145[0]
del buf145
# Topologically Sorted Source Nodes: [p1_9, p3_9, min_19, p2_9, img_4, neg_64, max_pool3d_37], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf149 = torch.ops.aten.max_pool3d_with_indices.default(buf148, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf148
buf150 = buf149[0]
del buf149
buf152 = buf142; del buf142 # reuse
# Topologically Sorted Source Nodes: [p1_10, p3_10, min_21, p2_10, min_22], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf152, buf150, buf146, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_10, p3_10, min_21, p2_10, min_22, img1_5], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf153 = torch.ops.aten.max_pool3d_with_indices.default(buf152, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf154 = buf153[0]
del buf153
# Topologically Sorted Source Nodes: [p1_9, p3_9, min_19, p2_9, img_4, neg_66, max_pool3d_39], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf157 = torch.ops.aten.max_pool3d_with_indices.default(buf156, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf158 = buf157[0]
del buf157
# Topologically Sorted Source Nodes: [p1_9, p3_9, min_19, p2_9, img_4, neg_68, max_pool3d_40], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf161 = torch.ops.aten.max_pool3d_with_indices.default(buf160, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf162 = buf161[0]
del buf161
# Topologically Sorted Source Nodes: [p1_9, p3_9, min_19, p2_9, img_4, neg_70, max_pool3d_41], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf165 = torch.ops.aten.max_pool3d_with_indices.default(buf164, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf166 = buf165[0]
del buf165
buf168 = buf164; del buf164 # reuse
buf172 = buf160; del buf160 # reuse
buf176 = buf156; del buf156 # reuse
buf184 = buf152; del buf152 # reuse
buf188 = buf150; del buf150 # reuse
buf192 = buf146; del buf146 # reuse
# Topologically Sorted Source Nodes: [p1_11, p3_11, min_23, p2_11, img_5, neg_72, neg_74, neg_76, neg_78, neg_80, neg_82], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf158, buf166, buf162, buf168, buf172, buf176, buf184, buf188, buf192, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_11, p3_11, min_23, p2_11, img_5, neg_72, max_pool3d_42], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf169 = torch.ops.aten.max_pool3d_with_indices.default(buf168, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf168
buf170 = buf169[0]
del buf169
# Topologically Sorted Source Nodes: [p1_11, p3_11, min_23, p2_11, img_5, neg_74, max_pool3d_43], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf173 = torch.ops.aten.max_pool3d_with_indices.default(buf172, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf172
buf174 = buf173[0]
del buf173
# Topologically Sorted Source Nodes: [p1_11, p3_11, min_23, p2_11, img_5, neg_76, max_pool3d_44], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf177 = torch.ops.aten.max_pool3d_with_indices.default(buf176, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf176
buf178 = buf177[0]
del buf177
buf180 = buf170; del buf170 # reuse
# Topologically Sorted Source Nodes: [p1_12, p3_12, min_25, p2_12, min_26], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf180, buf178, buf174, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_12, p3_12, min_25, p2_12, min_26, img1_6], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf181 = torch.ops.aten.max_pool3d_with_indices.default(buf180, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf182 = buf181[0]
del buf181
# Topologically Sorted Source Nodes: [p1_11, p3_11, min_23, p2_11, img_5, neg_78, max_pool3d_46], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf185 = torch.ops.aten.max_pool3d_with_indices.default(buf184, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf186 = buf185[0]
del buf185
# Topologically Sorted Source Nodes: [p1_11, p3_11, min_23, p2_11, img_5, neg_80, max_pool3d_47], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf189 = torch.ops.aten.max_pool3d_with_indices.default(buf188, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf190 = buf189[0]
del buf189
# Topologically Sorted Source Nodes: [p1_11, p3_11, min_23, p2_11, img_5, neg_82, max_pool3d_48], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf193 = torch.ops.aten.max_pool3d_with_indices.default(buf192, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf194 = buf193[0]
del buf193
buf196 = buf192; del buf192 # reuse
buf200 = buf188; del buf188 # reuse
buf204 = buf184; del buf184 # reuse
buf212 = buf180; del buf180 # reuse
buf216 = buf178; del buf178 # reuse
buf220 = buf174; del buf174 # reuse
# Topologically Sorted Source Nodes: [p1_13, p3_13, min_27, p2_13, img_6, neg_84, neg_86, neg_88, neg_90, neg_92, neg_94], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf186, buf194, buf190, buf196, buf200, buf204, buf212, buf216, buf220, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_13, p3_13, min_27, p2_13, img_6, neg_84, max_pool3d_49], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf197 = torch.ops.aten.max_pool3d_with_indices.default(buf196, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf196
buf198 = buf197[0]
del buf197
# Topologically Sorted Source Nodes: [p1_13, p3_13, min_27, p2_13, img_6, neg_86, max_pool3d_50], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf201 = torch.ops.aten.max_pool3d_with_indices.default(buf200, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf200
buf202 = buf201[0]
del buf201
# Topologically Sorted Source Nodes: [p1_13, p3_13, min_27, p2_13, img_6, neg_88, max_pool3d_51], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf205 = torch.ops.aten.max_pool3d_with_indices.default(buf204, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf204
buf206 = buf205[0]
del buf205
buf208 = buf198; del buf198 # reuse
# Topologically Sorted Source Nodes: [p1_14, p3_14, min_29, p2_14, min_30], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf208, buf206, buf202, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_14, p3_14, min_29, p2_14, min_30, img1_7], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf209 = torch.ops.aten.max_pool3d_with_indices.default(buf208, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf210 = buf209[0]
del buf209
# Topologically Sorted Source Nodes: [p1_13, p3_13, min_27, p2_13, img_6, neg_90, max_pool3d_53], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf213 = torch.ops.aten.max_pool3d_with_indices.default(buf212, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf214 = buf213[0]
del buf213
# Topologically Sorted Source Nodes: [p1_13, p3_13, min_27, p2_13, img_6, neg_92, max_pool3d_54], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf217 = torch.ops.aten.max_pool3d_with_indices.default(buf216, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf218 = buf217[0]
del buf217
# Topologically Sorted Source Nodes: [p1_13, p3_13, min_27, p2_13, img_6, neg_94, max_pool3d_55], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf221 = torch.ops.aten.max_pool3d_with_indices.default(buf220, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf222 = buf221[0]
del buf221
buf224 = buf220; del buf220 # reuse
buf228 = buf216; del buf216 # reuse
buf232 = buf212; del buf212 # reuse
buf240 = buf208; del buf208 # reuse
buf244 = buf206; del buf206 # reuse
buf248 = buf202; del buf202 # reuse
# Topologically Sorted Source Nodes: [p1_15, p3_15, min_31, p2_15, img_7, neg_96, neg_98, neg_100, neg_102, neg_104, neg_106], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf214, buf222, buf218, buf224, buf228, buf232, buf240, buf244, buf248, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_15, p3_15, min_31, p2_15, img_7, neg_96, max_pool3d_56], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf225 = torch.ops.aten.max_pool3d_with_indices.default(buf224, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf224
buf226 = buf225[0]
del buf225
# Topologically Sorted Source Nodes: [p1_15, p3_15, min_31, p2_15, img_7, neg_98, max_pool3d_57], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf229 = torch.ops.aten.max_pool3d_with_indices.default(buf228, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf228
buf230 = buf229[0]
del buf229
# Topologically Sorted Source Nodes: [p1_15, p3_15, min_31, p2_15, img_7, neg_100, max_pool3d_58], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf233 = torch.ops.aten.max_pool3d_with_indices.default(buf232, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf232
buf234 = buf233[0]
del buf233
buf236 = buf226; del buf226 # reuse
# Topologically Sorted Source Nodes: [p1_16, p3_16, min_33, p2_16, min_34], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf236, buf234, buf230, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_16, p3_16, min_33, p2_16, min_34, img1_8], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf237 = torch.ops.aten.max_pool3d_with_indices.default(buf236, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf238 = buf237[0]
del buf237
# Topologically Sorted Source Nodes: [p1_15, p3_15, min_31, p2_15, img_7, neg_102, max_pool3d_60], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf241 = torch.ops.aten.max_pool3d_with_indices.default(buf240, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf242 = buf241[0]
del buf241
# Topologically Sorted Source Nodes: [p1_15, p3_15, min_31, p2_15, img_7, neg_104, max_pool3d_61], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf245 = torch.ops.aten.max_pool3d_with_indices.default(buf244, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf246 = buf245[0]
del buf245
# Topologically Sorted Source Nodes: [p1_15, p3_15, min_31, p2_15, img_7, neg_106, max_pool3d_62], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf249 = torch.ops.aten.max_pool3d_with_indices.default(buf248, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf250 = buf249[0]
del buf249
buf252 = buf248; del buf248 # reuse
buf256 = buf244; del buf244 # reuse
buf260 = buf240; del buf240 # reuse
buf268 = buf236; del buf236 # reuse
buf272 = buf234; del buf234 # reuse
buf276 = buf230; del buf230 # reuse
# Topologically Sorted Source Nodes: [p1_17, p3_17, min_35, p2_17, img_8, neg_108, neg_110, neg_112, neg_114, neg_116, neg_118], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf242, buf250, buf246, buf252, buf256, buf260, buf268, buf272, buf276, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_17, p3_17, min_35, p2_17, img_8, neg_108, max_pool3d_63], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf253 = torch.ops.aten.max_pool3d_with_indices.default(buf252, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf252
buf254 = buf253[0]
del buf253
# Topologically Sorted Source Nodes: [p1_17, p3_17, min_35, p2_17, img_8, neg_110, max_pool3d_64], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf257 = torch.ops.aten.max_pool3d_with_indices.default(buf256, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf256
buf258 = buf257[0]
del buf257
# Topologically Sorted Source Nodes: [p1_17, p3_17, min_35, p2_17, img_8, neg_112, max_pool3d_65], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf261 = torch.ops.aten.max_pool3d_with_indices.default(buf260, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf260
buf262 = buf261[0]
del buf261
buf264 = buf254; del buf254 # reuse
# Topologically Sorted Source Nodes: [p1_18, p3_18, min_37, p2_18, min_38], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf264, buf262, buf258, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_18, p3_18, min_37, p2_18, min_38, img1_9], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf265 = torch.ops.aten.max_pool3d_with_indices.default(buf264, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf266 = buf265[0]
del buf265
# Topologically Sorted Source Nodes: [p1_17, p3_17, min_35, p2_17, img_8, neg_114, max_pool3d_67], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf269 = torch.ops.aten.max_pool3d_with_indices.default(buf268, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf270 = buf269[0]
del buf269
# Topologically Sorted Source Nodes: [p1_17, p3_17, min_35, p2_17, img_8, neg_116, max_pool3d_68], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf273 = torch.ops.aten.max_pool3d_with_indices.default(buf272, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf274 = buf273[0]
del buf273
# Topologically Sorted Source Nodes: [p1_17, p3_17, min_35, p2_17, img_8, neg_118, max_pool3d_69], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf277 = torch.ops.aten.max_pool3d_with_indices.default(buf276, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf278 = buf277[0]
del buf277
buf280 = buf276; del buf276 # reuse
buf284 = buf272; del buf272 # reuse
buf288 = buf268; del buf268 # reuse
buf296 = buf264; del buf264 # reuse
buf300 = buf262; del buf262 # reuse
buf304 = buf258; del buf258 # reuse
# Topologically Sorted Source Nodes: [p1_19, p3_19, min_39, p2_19, img_9, neg_120, neg_122, neg_124, neg_126, neg_128, neg_130], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf270, buf278, buf274, buf280, buf284, buf288, buf296, buf300, buf304, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_19, p3_19, min_39, p2_19, img_9, neg_120, max_pool3d_70], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf281 = torch.ops.aten.max_pool3d_with_indices.default(buf280, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf280
buf282 = buf281[0]
del buf281
# Topologically Sorted Source Nodes: [p1_19, p3_19, min_39, p2_19, img_9, neg_122, max_pool3d_71], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf285 = torch.ops.aten.max_pool3d_with_indices.default(buf284, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf284
buf286 = buf285[0]
del buf285
# Topologically Sorted Source Nodes: [p1_19, p3_19, min_39, p2_19, img_9, neg_124, max_pool3d_72], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf289 = torch.ops.aten.max_pool3d_with_indices.default(buf288, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf288
buf290 = buf289[0]
del buf289
buf292 = buf282; del buf282 # reuse
# Topologically Sorted Source Nodes: [p1_20, p3_20, min_41, p2_20, min_42], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf292, buf290, buf286, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_20, p3_20, min_41, p2_20, min_42, img1_10], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf293 = torch.ops.aten.max_pool3d_with_indices.default(buf292, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf294 = buf293[0]
del buf293
# Topologically Sorted Source Nodes: [p1_19, p3_19, min_39, p2_19, img_9, neg_126, max_pool3d_74], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf297 = torch.ops.aten.max_pool3d_with_indices.default(buf296, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf298 = buf297[0]
del buf297
# Topologically Sorted Source Nodes: [p1_19, p3_19, min_39, p2_19, img_9, neg_128, max_pool3d_75], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf301 = torch.ops.aten.max_pool3d_with_indices.default(buf300, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf302 = buf301[0]
del buf301
# Topologically Sorted Source Nodes: [p1_19, p3_19, min_39, p2_19, img_9, neg_130, max_pool3d_76], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf305 = torch.ops.aten.max_pool3d_with_indices.default(buf304, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf306 = buf305[0]
del buf305
buf308 = buf304; del buf304 # reuse
buf312 = buf300; del buf300 # reuse
buf316 = buf296; del buf296 # reuse
buf324 = buf292; del buf292 # reuse
buf328 = buf290; del buf290 # reuse
buf332 = buf286; del buf286 # reuse
# Topologically Sorted Source Nodes: [p1_21, p3_21, min_43, p2_21, img_10, neg_132, neg_134, neg_136, neg_138, neg_140, neg_142], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf298, buf306, buf302, buf308, buf312, buf316, buf324, buf328, buf332, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_21, p3_21, min_43, p2_21, img_10, neg_132, max_pool3d_77], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf309 = torch.ops.aten.max_pool3d_with_indices.default(buf308, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf308
buf310 = buf309[0]
del buf309
# Topologically Sorted Source Nodes: [p1_21, p3_21, min_43, p2_21, img_10, neg_134, max_pool3d_78], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf313 = torch.ops.aten.max_pool3d_with_indices.default(buf312, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf312
buf314 = buf313[0]
del buf313
# Topologically Sorted Source Nodes: [p1_21, p3_21, min_43, p2_21, img_10, neg_136, max_pool3d_79], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf317 = torch.ops.aten.max_pool3d_with_indices.default(buf316, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf316
buf318 = buf317[0]
del buf317
buf320 = buf310; del buf310 # reuse
# Topologically Sorted Source Nodes: [p1_22, p3_22, min_45, p2_22, min_46], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf320, buf318, buf314, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_22, p3_22, min_45, p2_22, min_46, img1_11], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf321 = torch.ops.aten.max_pool3d_with_indices.default(buf320, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf322 = buf321[0]
del buf321
# Topologically Sorted Source Nodes: [p1_21, p3_21, min_43, p2_21, img_10, neg_138, max_pool3d_81], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf325 = torch.ops.aten.max_pool3d_with_indices.default(buf324, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf326 = buf325[0]
del buf325
# Topologically Sorted Source Nodes: [p1_21, p3_21, min_43, p2_21, img_10, neg_140, max_pool3d_82], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf329 = torch.ops.aten.max_pool3d_with_indices.default(buf328, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf330 = buf329[0]
del buf329
# Topologically Sorted Source Nodes: [p1_21, p3_21, min_43, p2_21, img_10, neg_142, max_pool3d_83], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf333 = torch.ops.aten.max_pool3d_with_indices.default(buf332, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf334 = buf333[0]
del buf333
buf336 = buf332; del buf332 # reuse
buf340 = buf328; del buf328 # reuse
buf344 = buf324; del buf324 # reuse
buf352 = buf320; del buf320 # reuse
buf356 = buf318; del buf318 # reuse
buf360 = buf314; del buf314 # reuse
# Topologically Sorted Source Nodes: [p1_23, p3_23, min_47, p2_23, img_11, neg_144, neg_146, neg_148, neg_150, neg_152, neg_154], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf326, buf334, buf330, buf336, buf340, buf344, buf352, buf356, buf360, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_23, p3_23, min_47, p2_23, img_11, neg_144, max_pool3d_84], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf337 = torch.ops.aten.max_pool3d_with_indices.default(buf336, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf336
buf338 = buf337[0]
del buf337
# Topologically Sorted Source Nodes: [p1_23, p3_23, min_47, p2_23, img_11, neg_146, max_pool3d_85], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf341 = torch.ops.aten.max_pool3d_with_indices.default(buf340, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf340
buf342 = buf341[0]
del buf341
# Topologically Sorted Source Nodes: [p1_23, p3_23, min_47, p2_23, img_11, neg_148, max_pool3d_86], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf345 = torch.ops.aten.max_pool3d_with_indices.default(buf344, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf344
buf346 = buf345[0]
del buf345
buf348 = buf338; del buf338 # reuse
# Topologically Sorted Source Nodes: [p1_24, p3_24, min_49, p2_24, min_50], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf348, buf346, buf342, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_24, p3_24, min_49, p2_24, min_50, img1_12], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf349 = torch.ops.aten.max_pool3d_with_indices.default(buf348, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf350 = buf349[0]
del buf349
# Topologically Sorted Source Nodes: [p1_23, p3_23, min_47, p2_23, img_11, neg_150, max_pool3d_88], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf353 = torch.ops.aten.max_pool3d_with_indices.default(buf352, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf354 = buf353[0]
del buf353
# Topologically Sorted Source Nodes: [p1_23, p3_23, min_47, p2_23, img_11, neg_152, max_pool3d_89], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf357 = torch.ops.aten.max_pool3d_with_indices.default(buf356, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf358 = buf357[0]
del buf357
# Topologically Sorted Source Nodes: [p1_23, p3_23, min_47, p2_23, img_11, neg_154, max_pool3d_90], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf361 = torch.ops.aten.max_pool3d_with_indices.default(buf360, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf362 = buf361[0]
del buf361
buf364 = buf360; del buf360 # reuse
buf368 = buf356; del buf356 # reuse
buf372 = buf352; del buf352 # reuse
buf380 = buf348; del buf348 # reuse
buf384 = buf346; del buf346 # reuse
buf388 = buf342; del buf342 # reuse
# Topologically Sorted Source Nodes: [p1_25, p3_25, min_51, p2_25, img_12, neg_156, neg_158, neg_160, neg_162, neg_164, neg_166], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf354, buf362, buf358, buf364, buf368, buf372, buf380, buf384, buf388, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_25, p3_25, min_51, p2_25, img_12, neg_156, max_pool3d_91], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf365 = torch.ops.aten.max_pool3d_with_indices.default(buf364, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf364
buf366 = buf365[0]
del buf365
# Topologically Sorted Source Nodes: [p1_25, p3_25, min_51, p2_25, img_12, neg_158, max_pool3d_92], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf369 = torch.ops.aten.max_pool3d_with_indices.default(buf368, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf368
buf370 = buf369[0]
del buf369
# Topologically Sorted Source Nodes: [p1_25, p3_25, min_51, p2_25, img_12, neg_160, max_pool3d_93], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf373 = torch.ops.aten.max_pool3d_with_indices.default(buf372, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf372
buf374 = buf373[0]
del buf373
buf376 = buf366; del buf366 # reuse
# Topologically Sorted Source Nodes: [p1_26, p3_26, min_53, p2_26, min_54], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf376, buf374, buf370, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_26, p3_26, min_53, p2_26, min_54, img1_13], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf377 = torch.ops.aten.max_pool3d_with_indices.default(buf376, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf378 = buf377[0]
del buf377
# Topologically Sorted Source Nodes: [p1_25, p3_25, min_51, p2_25, img_12, neg_162, max_pool3d_95], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf381 = torch.ops.aten.max_pool3d_with_indices.default(buf380, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf382 = buf381[0]
del buf381
# Topologically Sorted Source Nodes: [p1_25, p3_25, min_51, p2_25, img_12, neg_164, max_pool3d_96], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf385 = torch.ops.aten.max_pool3d_with_indices.default(buf384, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf386 = buf385[0]
del buf385
# Topologically Sorted Source Nodes: [p1_25, p3_25, min_51, p2_25, img_12, neg_166, max_pool3d_97], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf389 = torch.ops.aten.max_pool3d_with_indices.default(buf388, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf390 = buf389[0]
del buf389
buf392 = buf388; del buf388 # reuse
buf396 = buf384; del buf384 # reuse
buf400 = buf380; del buf380 # reuse
buf408 = buf376; del buf376 # reuse
buf412 = buf374; del buf374 # reuse
buf416 = buf370; del buf370 # reuse
# Topologically Sorted Source Nodes: [p1_27, p3_27, min_55, p2_27, img_13, neg_168, neg_170, neg_172, neg_174, neg_176, neg_178], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf382, buf390, buf386, buf392, buf396, buf400, buf408, buf412, buf416, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_27, p3_27, min_55, p2_27, img_13, neg_168, max_pool3d_98], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf393 = torch.ops.aten.max_pool3d_with_indices.default(buf392, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf392
buf394 = buf393[0]
del buf393
# Topologically Sorted Source Nodes: [p1_27, p3_27, min_55, p2_27, img_13, neg_170, max_pool3d_99], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf397 = torch.ops.aten.max_pool3d_with_indices.default(buf396, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf396
buf398 = buf397[0]
del buf397
# Topologically Sorted Source Nodes: [p1_27, p3_27, min_55, p2_27, img_13, neg_172, max_pool3d_100], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf401 = torch.ops.aten.max_pool3d_with_indices.default(buf400, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf400
buf402 = buf401[0]
del buf401
buf404 = buf394; del buf394 # reuse
# Topologically Sorted Source Nodes: [p1_28, p3_28, min_57, p2_28, min_58], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf404, buf402, buf398, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_28, p3_28, min_57, p2_28, min_58, img1_14], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf405 = torch.ops.aten.max_pool3d_with_indices.default(buf404, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf406 = buf405[0]
del buf405
# Topologically Sorted Source Nodes: [p1_27, p3_27, min_55, p2_27, img_13, neg_174, max_pool3d_102], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf409 = torch.ops.aten.max_pool3d_with_indices.default(buf408, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf410 = buf409[0]
del buf409
# Topologically Sorted Source Nodes: [p1_27, p3_27, min_55, p2_27, img_13, neg_176, max_pool3d_103], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf413 = torch.ops.aten.max_pool3d_with_indices.default(buf412, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf414 = buf413[0]
del buf413
# Topologically Sorted Source Nodes: [p1_27, p3_27, min_55, p2_27, img_13, neg_178, max_pool3d_104], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf417 = torch.ops.aten.max_pool3d_with_indices.default(buf416, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf418 = buf417[0]
del buf417
buf420 = buf416; del buf416 # reuse
buf424 = buf412; del buf412 # reuse
buf428 = buf408; del buf408 # reuse
buf436 = buf404; del buf404 # reuse
buf440 = buf402; del buf402 # reuse
buf444 = buf398; del buf398 # reuse
# Topologically Sorted Source Nodes: [p1_29, p3_29, min_59, p2_29, img_14, neg_180, neg_182, neg_184, neg_186, neg_188, neg_190], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf410, buf418, buf414, buf420, buf424, buf428, buf436, buf440, buf444, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_29, p3_29, min_59, p2_29, img_14, neg_180, max_pool3d_105], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf421 = torch.ops.aten.max_pool3d_with_indices.default(buf420, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf420
buf422 = buf421[0]
del buf421
# Topologically Sorted Source Nodes: [p1_29, p3_29, min_59, p2_29, img_14, neg_182, max_pool3d_106], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf425 = torch.ops.aten.max_pool3d_with_indices.default(buf424, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf424
buf426 = buf425[0]
del buf425
# Topologically Sorted Source Nodes: [p1_29, p3_29, min_59, p2_29, img_14, neg_184, max_pool3d_107], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf429 = torch.ops.aten.max_pool3d_with_indices.default(buf428, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf428
buf430 = buf429[0]
del buf429
buf432 = buf422; del buf422 # reuse
# Topologically Sorted Source Nodes: [p1_30, p3_30, min_61, p2_30, min_62], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf432, buf430, buf426, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_30, p3_30, min_61, p2_30, min_62, img1_15], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf433 = torch.ops.aten.max_pool3d_with_indices.default(buf432, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf434 = buf433[0]
del buf433
# Topologically Sorted Source Nodes: [p1_29, p3_29, min_59, p2_29, img_14, neg_186, max_pool3d_109], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf437 = torch.ops.aten.max_pool3d_with_indices.default(buf436, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf438 = buf437[0]
del buf437
# Topologically Sorted Source Nodes: [p1_29, p3_29, min_59, p2_29, img_14, neg_188, max_pool3d_110], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf441 = torch.ops.aten.max_pool3d_with_indices.default(buf440, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf442 = buf441[0]
del buf441
# Topologically Sorted Source Nodes: [p1_29, p3_29, min_59, p2_29, img_14, neg_190, max_pool3d_111], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf445 = torch.ops.aten.max_pool3d_with_indices.default(buf444, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf446 = buf445[0]
del buf445
buf448 = buf444; del buf444 # reuse
buf452 = buf440; del buf440 # reuse
buf456 = buf436; del buf436 # reuse
buf464 = buf432; del buf432 # reuse
buf468 = buf430; del buf430 # reuse
buf472 = buf426; del buf426 # reuse
# Topologically Sorted Source Nodes: [p1_31, p3_31, min_63, p2_31, img_15, neg_192, neg_194, neg_196, neg_198, neg_200, neg_202], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf438, buf446, buf442, buf448, buf452, buf456, buf464, buf468, buf472, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_31, p3_31, min_63, p2_31, img_15, neg_192, max_pool3d_112], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf449 = torch.ops.aten.max_pool3d_with_indices.default(buf448, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf448
buf450 = buf449[0]
del buf449
# Topologically Sorted Source Nodes: [p1_31, p3_31, min_63, p2_31, img_15, neg_194, max_pool3d_113], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf453 = torch.ops.aten.max_pool3d_with_indices.default(buf452, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf452
buf454 = buf453[0]
del buf453
# Topologically Sorted Source Nodes: [p1_31, p3_31, min_63, p2_31, img_15, neg_196, max_pool3d_114], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf457 = torch.ops.aten.max_pool3d_with_indices.default(buf456, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf456
buf458 = buf457[0]
del buf457
buf460 = buf450; del buf450 # reuse
# Topologically Sorted Source Nodes: [p1_32, p3_32, min_65, p2_32, min_66], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf460, buf458, buf454, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_32, p3_32, min_65, p2_32, min_66, img1_16], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf461 = torch.ops.aten.max_pool3d_with_indices.default(buf460, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf462 = buf461[0]
del buf461
# Topologically Sorted Source Nodes: [p1_31, p3_31, min_63, p2_31, img_15, neg_198, max_pool3d_116], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf465 = torch.ops.aten.max_pool3d_with_indices.default(buf464, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf466 = buf465[0]
del buf465
# Topologically Sorted Source Nodes: [p1_31, p3_31, min_63, p2_31, img_15, neg_200, max_pool3d_117], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf469 = torch.ops.aten.max_pool3d_with_indices.default(buf468, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf470 = buf469[0]
del buf469
# Topologically Sorted Source Nodes: [p1_31, p3_31, min_63, p2_31, img_15, neg_202, max_pool3d_118], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf473 = torch.ops.aten.max_pool3d_with_indices.default(buf472, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf474 = buf473[0]
del buf473
buf476 = buf472; del buf472 # reuse
buf480 = buf468; del buf468 # reuse
buf484 = buf464; del buf464 # reuse
buf492 = buf460; del buf460 # reuse
buf496 = buf458; del buf458 # reuse
buf500 = buf454; del buf454 # reuse
# Topologically Sorted Source Nodes: [p1_33, p3_33, min_67, p2_33, img_16, neg_204, neg_206, neg_208, neg_210, neg_212, neg_214], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf466, buf474, buf470, buf476, buf480, buf484, buf492, buf496, buf500, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_33, p3_33, min_67, p2_33, img_16, neg_204, max_pool3d_119], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf477 = torch.ops.aten.max_pool3d_with_indices.default(buf476, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf476
buf478 = buf477[0]
del buf477
# Topologically Sorted Source Nodes: [p1_33, p3_33, min_67, p2_33, img_16, neg_206, max_pool3d_120], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf481 = torch.ops.aten.max_pool3d_with_indices.default(buf480, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf480
buf482 = buf481[0]
del buf481
# Topologically Sorted Source Nodes: [p1_33, p3_33, min_67, p2_33, img_16, neg_208, max_pool3d_121], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf485 = torch.ops.aten.max_pool3d_with_indices.default(buf484, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf484
buf486 = buf485[0]
del buf485
buf488 = buf478; del buf478 # reuse
# Topologically Sorted Source Nodes: [p1_34, p3_34, min_69, p2_34, min_70], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf488, buf486, buf482, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_34, p3_34, min_69, p2_34, min_70, img1_17], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf489 = torch.ops.aten.max_pool3d_with_indices.default(buf488, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf490 = buf489[0]
del buf489
# Topologically Sorted Source Nodes: [p1_33, p3_33, min_67, p2_33, img_16, neg_210, max_pool3d_123], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf493 = torch.ops.aten.max_pool3d_with_indices.default(buf492, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf494 = buf493[0]
del buf493
# Topologically Sorted Source Nodes: [p1_33, p3_33, min_67, p2_33, img_16, neg_212, max_pool3d_124], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf497 = torch.ops.aten.max_pool3d_with_indices.default(buf496, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf498 = buf497[0]
del buf497
# Topologically Sorted Source Nodes: [p1_33, p3_33, min_67, p2_33, img_16, neg_214, max_pool3d_125], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf501 = torch.ops.aten.max_pool3d_with_indices.default(buf500, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf502 = buf501[0]
del buf501
buf504 = buf500; del buf500 # reuse
buf508 = buf496; del buf496 # reuse
buf512 = buf492; del buf492 # reuse
buf520 = buf488; del buf488 # reuse
buf524 = buf486; del buf486 # reuse
buf528 = buf482; del buf482 # reuse
# Topologically Sorted Source Nodes: [p1_35, p3_35, min_71, p2_35, img_17, neg_216, neg_218, neg_220, neg_222, neg_224, neg_226], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf494, buf502, buf498, buf504, buf508, buf512, buf520, buf524, buf528, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_35, p3_35, min_71, p2_35, img_17, neg_216, max_pool3d_126], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf505 = torch.ops.aten.max_pool3d_with_indices.default(buf504, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf504
buf506 = buf505[0]
del buf505
# Topologically Sorted Source Nodes: [p1_35, p3_35, min_71, p2_35, img_17, neg_218, max_pool3d_127], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf509 = torch.ops.aten.max_pool3d_with_indices.default(buf508, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf508
buf510 = buf509[0]
del buf509
# Topologically Sorted Source Nodes: [p1_35, p3_35, min_71, p2_35, img_17, neg_220, max_pool3d_128], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf513 = torch.ops.aten.max_pool3d_with_indices.default(buf512, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf512
buf514 = buf513[0]
del buf513
buf516 = buf506; del buf506 # reuse
# Topologically Sorted Source Nodes: [p1_36, p3_36, min_73, p2_36, min_74], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf516, buf514, buf510, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_36, p3_36, min_73, p2_36, min_74, img1_18], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf517 = torch.ops.aten.max_pool3d_with_indices.default(buf516, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf518 = buf517[0]
del buf517
# Topologically Sorted Source Nodes: [p1_35, p3_35, min_71, p2_35, img_17, neg_222, max_pool3d_130], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf521 = torch.ops.aten.max_pool3d_with_indices.default(buf520, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf522 = buf521[0]
del buf521
# Topologically Sorted Source Nodes: [p1_35, p3_35, min_71, p2_35, img_17, neg_224, max_pool3d_131], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf525 = torch.ops.aten.max_pool3d_with_indices.default(buf524, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf526 = buf525[0]
del buf525
# Topologically Sorted Source Nodes: [p1_35, p3_35, min_71, p2_35, img_17, neg_226, max_pool3d_132], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf529 = torch.ops.aten.max_pool3d_with_indices.default(buf528, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf530 = buf529[0]
del buf529
buf532 = buf528; del buf528 # reuse
buf536 = buf524; del buf524 # reuse
buf540 = buf520; del buf520 # reuse
buf548 = buf516; del buf516 # reuse
buf552 = buf514; del buf514 # reuse
buf556 = buf510; del buf510 # reuse
# Topologically Sorted Source Nodes: [p1_37, p3_37, min_75, p2_37, img_18, neg_228, neg_230, neg_232, neg_234, neg_236, neg_238], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf522, buf530, buf526, buf532, buf536, buf540, buf548, buf552, buf556, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_37, p3_37, min_75, p2_37, img_18, neg_228, max_pool3d_133], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf533 = torch.ops.aten.max_pool3d_with_indices.default(buf532, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf532
buf534 = buf533[0]
del buf533
# Topologically Sorted Source Nodes: [p1_37, p3_37, min_75, p2_37, img_18, neg_230, max_pool3d_134], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf537 = torch.ops.aten.max_pool3d_with_indices.default(buf536, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf536
buf538 = buf537[0]
del buf537
# Topologically Sorted Source Nodes: [p1_37, p3_37, min_75, p2_37, img_18, neg_232, max_pool3d_135], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf541 = torch.ops.aten.max_pool3d_with_indices.default(buf540, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf540
buf542 = buf541[0]
del buf541
buf544 = buf534; del buf534 # reuse
# Topologically Sorted Source Nodes: [p1_38, p3_38, min_77, p2_38, min_78], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf544, buf542, buf538, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_38, p3_38, min_77, p2_38, min_78, img1_19], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf545 = torch.ops.aten.max_pool3d_with_indices.default(buf544, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf546 = buf545[0]
del buf545
# Topologically Sorted Source Nodes: [p1_37, p3_37, min_75, p2_37, img_18, neg_234, max_pool3d_137], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf549 = torch.ops.aten.max_pool3d_with_indices.default(buf548, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf550 = buf549[0]
del buf549
# Topologically Sorted Source Nodes: [p1_37, p3_37, min_75, p2_37, img_18, neg_236, max_pool3d_138], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf553 = torch.ops.aten.max_pool3d_with_indices.default(buf552, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf554 = buf553[0]
del buf553
# Topologically Sorted Source Nodes: [p1_37, p3_37, min_75, p2_37, img_18, neg_238, max_pool3d_139], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf557 = torch.ops.aten.max_pool3d_with_indices.default(buf556, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf558 = buf557[0]
del buf557
buf560 = buf556; del buf556 # reuse
buf564 = buf552; del buf552 # reuse
buf568 = buf548; del buf548 # reuse
buf576 = buf544; del buf544 # reuse
buf580 = buf542; del buf542 # reuse
buf584 = buf538; del buf538 # reuse
# Topologically Sorted Source Nodes: [p1_39, p3_39, min_79, p2_39, img_19, neg_240, neg_242, neg_244, neg_246, neg_248, neg_250], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf550, buf558, buf554, buf560, buf564, buf568, buf576, buf580, buf584, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_39, p3_39, min_79, p2_39, img_19, neg_240, max_pool3d_140], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf561 = torch.ops.aten.max_pool3d_with_indices.default(buf560, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf560
buf562 = buf561[0]
del buf561
# Topologically Sorted Source Nodes: [p1_39, p3_39, min_79, p2_39, img_19, neg_242, max_pool3d_141], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf565 = torch.ops.aten.max_pool3d_with_indices.default(buf564, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf564
buf566 = buf565[0]
del buf565
# Topologically Sorted Source Nodes: [p1_39, p3_39, min_79, p2_39, img_19, neg_244, max_pool3d_142], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf569 = torch.ops.aten.max_pool3d_with_indices.default(buf568, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf568
buf570 = buf569[0]
del buf569
buf572 = buf562; del buf562 # reuse
# Topologically Sorted Source Nodes: [p1_40, p3_40, min_81, p2_40, min_82], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf572, buf570, buf566, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_40, p3_40, min_81, p2_40, min_82, img1_20], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf573 = torch.ops.aten.max_pool3d_with_indices.default(buf572, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf574 = buf573[0]
del buf573
# Topologically Sorted Source Nodes: [p1_39, p3_39, min_79, p2_39, img_19, neg_246, max_pool3d_144], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf577 = torch.ops.aten.max_pool3d_with_indices.default(buf576, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf578 = buf577[0]
del buf577
# Topologically Sorted Source Nodes: [p1_39, p3_39, min_79, p2_39, img_19, neg_248, max_pool3d_145], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf581 = torch.ops.aten.max_pool3d_with_indices.default(buf580, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf582 = buf581[0]
del buf581
# Topologically Sorted Source Nodes: [p1_39, p3_39, min_79, p2_39, img_19, neg_250, max_pool3d_146], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf585 = torch.ops.aten.max_pool3d_with_indices.default(buf584, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf586 = buf585[0]
del buf585
buf588 = buf584; del buf584 # reuse
buf592 = buf580; del buf580 # reuse
buf596 = buf576; del buf576 # reuse
buf604 = buf572; del buf572 # reuse
buf608 = buf570; del buf570 # reuse
buf612 = buf566; del buf566 # reuse
# Topologically Sorted Source Nodes: [p1_41, p3_41, min_83, p2_41, img_20, neg_252, neg_254, neg_256, neg_258, neg_260, neg_262], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf578, buf586, buf582, buf588, buf592, buf596, buf604, buf608, buf612, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_41, p3_41, min_83, p2_41, img_20, neg_252, max_pool3d_147], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf589 = torch.ops.aten.max_pool3d_with_indices.default(buf588, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf588
buf590 = buf589[0]
del buf589
# Topologically Sorted Source Nodes: [p1_41, p3_41, min_83, p2_41, img_20, neg_254, max_pool3d_148], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf593 = torch.ops.aten.max_pool3d_with_indices.default(buf592, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf592
buf594 = buf593[0]
del buf593
# Topologically Sorted Source Nodes: [p1_41, p3_41, min_83, p2_41, img_20, neg_256, max_pool3d_149], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf597 = torch.ops.aten.max_pool3d_with_indices.default(buf596, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf596
buf598 = buf597[0]
del buf597
buf600 = buf590; del buf590 # reuse
# Topologically Sorted Source Nodes: [p1_42, p3_42, min_85, p2_42, min_86], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf600, buf598, buf594, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_42, p3_42, min_85, p2_42, min_86, img1_21], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf601 = torch.ops.aten.max_pool3d_with_indices.default(buf600, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf602 = buf601[0]
del buf601
# Topologically Sorted Source Nodes: [p1_41, p3_41, min_83, p2_41, img_20, neg_258, max_pool3d_151], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf605 = torch.ops.aten.max_pool3d_with_indices.default(buf604, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf606 = buf605[0]
del buf605
# Topologically Sorted Source Nodes: [p1_41, p3_41, min_83, p2_41, img_20, neg_260, max_pool3d_152], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf609 = torch.ops.aten.max_pool3d_with_indices.default(buf608, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf610 = buf609[0]
del buf609
# Topologically Sorted Source Nodes: [p1_41, p3_41, min_83, p2_41, img_20, neg_262, max_pool3d_153], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf613 = torch.ops.aten.max_pool3d_with_indices.default(buf612, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf614 = buf613[0]
del buf613
buf616 = buf612; del buf612 # reuse
buf620 = buf608; del buf608 # reuse
buf624 = buf604; del buf604 # reuse
buf632 = buf600; del buf600 # reuse
buf636 = buf598; del buf598 # reuse
buf640 = buf594; del buf594 # reuse
# Topologically Sorted Source Nodes: [p1_43, p3_43, min_87, p2_43, img_21, neg_264, neg_266, neg_268, neg_270, neg_272, neg_274], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf606, buf614, buf610, buf616, buf620, buf624, buf632, buf636, buf640, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_43, p3_43, min_87, p2_43, img_21, neg_264, max_pool3d_154], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf617 = torch.ops.aten.max_pool3d_with_indices.default(buf616, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf616
buf618 = buf617[0]
del buf617
# Topologically Sorted Source Nodes: [p1_43, p3_43, min_87, p2_43, img_21, neg_266, max_pool3d_155], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf621 = torch.ops.aten.max_pool3d_with_indices.default(buf620, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf620
buf622 = buf621[0]
del buf621
# Topologically Sorted Source Nodes: [p1_43, p3_43, min_87, p2_43, img_21, neg_268, max_pool3d_156], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf625 = torch.ops.aten.max_pool3d_with_indices.default(buf624, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf624
buf626 = buf625[0]
del buf625
buf628 = buf618; del buf618 # reuse
# Topologically Sorted Source Nodes: [p1_44, p3_44, min_89, p2_44, min_90], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf628, buf626, buf622, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_44, p3_44, min_89, p2_44, min_90, img1_22], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf629 = torch.ops.aten.max_pool3d_with_indices.default(buf628, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf630 = buf629[0]
del buf629
# Topologically Sorted Source Nodes: [p1_43, p3_43, min_87, p2_43, img_21, neg_270, max_pool3d_158], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf633 = torch.ops.aten.max_pool3d_with_indices.default(buf632, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf634 = buf633[0]
del buf633
# Topologically Sorted Source Nodes: [p1_43, p3_43, min_87, p2_43, img_21, neg_272, max_pool3d_159], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf637 = torch.ops.aten.max_pool3d_with_indices.default(buf636, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf638 = buf637[0]
del buf637
# Topologically Sorted Source Nodes: [p1_43, p3_43, min_87, p2_43, img_21, neg_274, max_pool3d_160], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf641 = torch.ops.aten.max_pool3d_with_indices.default(buf640, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf642 = buf641[0]
del buf641
buf644 = buf640; del buf640 # reuse
buf648 = buf636; del buf636 # reuse
buf652 = buf632; del buf632 # reuse
buf660 = buf628; del buf628 # reuse
buf664 = buf626; del buf626 # reuse
buf668 = buf622; del buf622 # reuse
# Topologically Sorted Source Nodes: [p1_45, p3_45, min_91, p2_45, img_22, neg_276, neg_278, neg_280, neg_282, neg_284, neg_286], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf634, buf642, buf638, buf644, buf648, buf652, buf660, buf664, buf668, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_45, p3_45, min_91, p2_45, img_22, neg_276, max_pool3d_161], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf645 = torch.ops.aten.max_pool3d_with_indices.default(buf644, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf644
buf646 = buf645[0]
del buf645
# Topologically Sorted Source Nodes: [p1_45, p3_45, min_91, p2_45, img_22, neg_278, max_pool3d_162], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf649 = torch.ops.aten.max_pool3d_with_indices.default(buf648, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf648
buf650 = buf649[0]
del buf649
# Topologically Sorted Source Nodes: [p1_45, p3_45, min_91, p2_45, img_22, neg_280, max_pool3d_163], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf653 = torch.ops.aten.max_pool3d_with_indices.default(buf652, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf652
buf654 = buf653[0]
del buf653
buf656 = buf646; del buf646 # reuse
# Topologically Sorted Source Nodes: [p1_46, p3_46, min_93, p2_46, min_94], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf656, buf654, buf650, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_46, p3_46, min_93, p2_46, min_94, img1_23], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf657 = torch.ops.aten.max_pool3d_with_indices.default(buf656, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf658 = buf657[0]
del buf657
# Topologically Sorted Source Nodes: [p1_45, p3_45, min_91, p2_45, img_22, neg_282, max_pool3d_165], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf661 = torch.ops.aten.max_pool3d_with_indices.default(buf660, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf662 = buf661[0]
del buf661
# Topologically Sorted Source Nodes: [p1_45, p3_45, min_91, p2_45, img_22, neg_284, max_pool3d_166], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf665 = torch.ops.aten.max_pool3d_with_indices.default(buf664, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf666 = buf665[0]
del buf665
# Topologically Sorted Source Nodes: [p1_45, p3_45, min_91, p2_45, img_22, neg_286, max_pool3d_167], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf669 = torch.ops.aten.max_pool3d_with_indices.default(buf668, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf670 = buf669[0]
del buf669
buf672 = buf668; del buf668 # reuse
buf676 = buf664; del buf664 # reuse
buf680 = buf660; del buf660 # reuse
buf688 = buf656; del buf656 # reuse
buf692 = buf654; del buf654 # reuse
buf696 = buf650; del buf650 # reuse
# Topologically Sorted Source Nodes: [p1_47, p3_47, min_95, p2_47, img_23, neg_288, neg_290, neg_292, neg_294, neg_296, neg_298], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf662, buf670, buf666, buf672, buf676, buf680, buf688, buf692, buf696, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_47, p3_47, min_95, p2_47, img_23, neg_288, max_pool3d_168], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf673 = torch.ops.aten.max_pool3d_with_indices.default(buf672, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf672
buf674 = buf673[0]
del buf673
# Topologically Sorted Source Nodes: [p1_47, p3_47, min_95, p2_47, img_23, neg_290, max_pool3d_169], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf677 = torch.ops.aten.max_pool3d_with_indices.default(buf676, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf676
buf678 = buf677[0]
del buf677
# Topologically Sorted Source Nodes: [p1_47, p3_47, min_95, p2_47, img_23, neg_292, max_pool3d_170], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf681 = torch.ops.aten.max_pool3d_with_indices.default(buf680, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf680
buf682 = buf681[0]
del buf681
buf684 = buf674; del buf674 # reuse
# Topologically Sorted Source Nodes: [p1_48, p3_48, min_97, p2_48, min_98], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf684, buf682, buf678, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_48, p3_48, min_97, p2_48, min_98, img1_24], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf685 = torch.ops.aten.max_pool3d_with_indices.default(buf684, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf686 = buf685[0]
del buf685
# Topologically Sorted Source Nodes: [p1_47, p3_47, min_95, p2_47, img_23, neg_294, max_pool3d_172], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf689 = torch.ops.aten.max_pool3d_with_indices.default(buf688, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf690 = buf689[0]
del buf689
# Topologically Sorted Source Nodes: [p1_47, p3_47, min_95, p2_47, img_23, neg_296, max_pool3d_173], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf693 = torch.ops.aten.max_pool3d_with_indices.default(buf692, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf694 = buf693[0]
del buf693
# Topologically Sorted Source Nodes: [p1_47, p3_47, min_95, p2_47, img_23, neg_298, max_pool3d_174], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf697 = torch.ops.aten.max_pool3d_with_indices.default(buf696, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf698 = buf697[0]
del buf697
buf700 = buf696; del buf696 # reuse
buf704 = buf692; del buf692 # reuse
buf708 = buf688; del buf688 # reuse
buf716 = buf684; del buf684 # reuse
buf720 = buf682; del buf682 # reuse
buf724 = buf678; del buf678 # reuse
# Topologically Sorted Source Nodes: [p1_49, p3_49, min_99, p2_49, img_24, neg_300, neg_302, neg_304, neg_306, neg_308, neg_310], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf690, buf698, buf694, buf700, buf704, buf708, buf716, buf720, buf724, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_49, p3_49, min_99, p2_49, img_24, neg_300, max_pool3d_175], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf701 = torch.ops.aten.max_pool3d_with_indices.default(buf700, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf700
buf702 = buf701[0]
del buf701
# Topologically Sorted Source Nodes: [p1_49, p3_49, min_99, p2_49, img_24, neg_302, max_pool3d_176], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf705 = torch.ops.aten.max_pool3d_with_indices.default(buf704, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf704
buf706 = buf705[0]
del buf705
# Topologically Sorted Source Nodes: [p1_49, p3_49, min_99, p2_49, img_24, neg_304, max_pool3d_177], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf709 = torch.ops.aten.max_pool3d_with_indices.default(buf708, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf708
buf710 = buf709[0]
del buf709
buf712 = buf702; del buf702 # reuse
# Topologically Sorted Source Nodes: [p1_50, p3_50, min_101, p2_50, min_102], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf712, buf710, buf706, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_50, p3_50, min_101, p2_50, min_102, img1_25], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf713 = torch.ops.aten.max_pool3d_with_indices.default(buf712, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf714 = buf713[0]
del buf713
# Topologically Sorted Source Nodes: [p1_49, p3_49, min_99, p2_49, img_24, neg_306, max_pool3d_179], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf717 = torch.ops.aten.max_pool3d_with_indices.default(buf716, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf718 = buf717[0]
del buf717
# Topologically Sorted Source Nodes: [p1_49, p3_49, min_99, p2_49, img_24, neg_308, max_pool3d_180], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf721 = torch.ops.aten.max_pool3d_with_indices.default(buf720, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf722 = buf721[0]
del buf721
# Topologically Sorted Source Nodes: [p1_49, p3_49, min_99, p2_49, img_24, neg_310, max_pool3d_181], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf725 = torch.ops.aten.max_pool3d_with_indices.default(buf724, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf726 = buf725[0]
del buf725
buf728 = buf724; del buf724 # reuse
buf732 = buf720; del buf720 # reuse
buf736 = buf716; del buf716 # reuse
buf744 = buf712; del buf712 # reuse
buf748 = buf710; del buf710 # reuse
buf752 = buf706; del buf706 # reuse
# Topologically Sorted Source Nodes: [p1_51, p3_51, min_103, p2_51, img_25, neg_312, neg_314, neg_316, neg_318, neg_320, neg_322], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf718, buf726, buf722, buf728, buf732, buf736, buf744, buf748, buf752, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_51, p3_51, min_103, p2_51, img_25, neg_312, max_pool3d_182], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf729 = torch.ops.aten.max_pool3d_with_indices.default(buf728, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf728
buf730 = buf729[0]
del buf729
# Topologically Sorted Source Nodes: [p1_51, p3_51, min_103, p2_51, img_25, neg_314, max_pool3d_183], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf733 = torch.ops.aten.max_pool3d_with_indices.default(buf732, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf732
buf734 = buf733[0]
del buf733
# Topologically Sorted Source Nodes: [p1_51, p3_51, min_103, p2_51, img_25, neg_316, max_pool3d_184], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf737 = torch.ops.aten.max_pool3d_with_indices.default(buf736, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf736
buf738 = buf737[0]
del buf737
buf740 = buf730; del buf730 # reuse
# Topologically Sorted Source Nodes: [p1_52, p3_52, min_105, p2_52, min_106], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf740, buf738, buf734, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_52, p3_52, min_105, p2_52, min_106, img1_26], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf741 = torch.ops.aten.max_pool3d_with_indices.default(buf740, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf742 = buf741[0]
del buf741
# Topologically Sorted Source Nodes: [p1_51, p3_51, min_103, p2_51, img_25, neg_318, max_pool3d_186], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf745 = torch.ops.aten.max_pool3d_with_indices.default(buf744, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf746 = buf745[0]
del buf745
# Topologically Sorted Source Nodes: [p1_51, p3_51, min_103, p2_51, img_25, neg_320, max_pool3d_187], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf749 = torch.ops.aten.max_pool3d_with_indices.default(buf748, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf750 = buf749[0]
del buf749
# Topologically Sorted Source Nodes: [p1_51, p3_51, min_103, p2_51, img_25, neg_322, max_pool3d_188], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf753 = torch.ops.aten.max_pool3d_with_indices.default(buf752, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf754 = buf753[0]
del buf753
buf756 = buf752; del buf752 # reuse
buf760 = buf748; del buf748 # reuse
buf764 = buf744; del buf744 # reuse
buf772 = buf740; del buf740 # reuse
buf776 = buf738; del buf738 # reuse
buf780 = buf734; del buf734 # reuse
# Topologically Sorted Source Nodes: [p1_53, p3_53, min_107, p2_53, img_26, neg_324, neg_326, neg_328, neg_330, neg_332, neg_334], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf746, buf754, buf750, buf756, buf760, buf764, buf772, buf776, buf780, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_53, p3_53, min_107, p2_53, img_26, neg_324, max_pool3d_189], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf757 = torch.ops.aten.max_pool3d_with_indices.default(buf756, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf756
buf758 = buf757[0]
del buf757
# Topologically Sorted Source Nodes: [p1_53, p3_53, min_107, p2_53, img_26, neg_326, max_pool3d_190], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf761 = torch.ops.aten.max_pool3d_with_indices.default(buf760, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf760
buf762 = buf761[0]
del buf761
# Topologically Sorted Source Nodes: [p1_53, p3_53, min_107, p2_53, img_26, neg_328, max_pool3d_191], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf765 = torch.ops.aten.max_pool3d_with_indices.default(buf764, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf764
buf766 = buf765[0]
del buf765
buf768 = buf758; del buf758 # reuse
# Topologically Sorted Source Nodes: [p1_54, p3_54, min_109, p2_54, min_110], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf768, buf766, buf762, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_54, p3_54, min_109, p2_54, min_110, img1_27], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf769 = torch.ops.aten.max_pool3d_with_indices.default(buf768, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf770 = buf769[0]
del buf769
# Topologically Sorted Source Nodes: [p1_53, p3_53, min_107, p2_53, img_26, neg_330, max_pool3d_193], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf773 = torch.ops.aten.max_pool3d_with_indices.default(buf772, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf774 = buf773[0]
del buf773
# Topologically Sorted Source Nodes: [p1_53, p3_53, min_107, p2_53, img_26, neg_332, max_pool3d_194], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf777 = torch.ops.aten.max_pool3d_with_indices.default(buf776, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf778 = buf777[0]
del buf777
# Topologically Sorted Source Nodes: [p1_53, p3_53, min_107, p2_53, img_26, neg_334, max_pool3d_195], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf781 = torch.ops.aten.max_pool3d_with_indices.default(buf780, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf782 = buf781[0]
del buf781
buf784 = buf780; del buf780 # reuse
buf788 = buf776; del buf776 # reuse
buf792 = buf772; del buf772 # reuse
buf800 = buf768; del buf768 # reuse
buf804 = buf766; del buf766 # reuse
buf808 = buf762; del buf762 # reuse
# Topologically Sorted Source Nodes: [p1_55, p3_55, min_111, p2_55, img_27, neg_336, neg_338, neg_340, neg_342, neg_344, neg_346], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf774, buf782, buf778, buf784, buf788, buf792, buf800, buf804, buf808, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_55, p3_55, min_111, p2_55, img_27, neg_336, max_pool3d_196], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf785 = torch.ops.aten.max_pool3d_with_indices.default(buf784, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf784
buf786 = buf785[0]
del buf785
# Topologically Sorted Source Nodes: [p1_55, p3_55, min_111, p2_55, img_27, neg_338, max_pool3d_197], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf789 = torch.ops.aten.max_pool3d_with_indices.default(buf788, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf788
buf790 = buf789[0]
del buf789
# Topologically Sorted Source Nodes: [p1_55, p3_55, min_111, p2_55, img_27, neg_340, max_pool3d_198], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf793 = torch.ops.aten.max_pool3d_with_indices.default(buf792, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf792
buf794 = buf793[0]
del buf793
buf796 = buf786; del buf786 # reuse
# Topologically Sorted Source Nodes: [p1_56, p3_56, min_113, p2_56, min_114], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf796, buf794, buf790, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_56, p3_56, min_113, p2_56, min_114, img1_28], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf797 = torch.ops.aten.max_pool3d_with_indices.default(buf796, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf798 = buf797[0]
del buf797
# Topologically Sorted Source Nodes: [p1_55, p3_55, min_111, p2_55, img_27, neg_342, max_pool3d_200], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf801 = torch.ops.aten.max_pool3d_with_indices.default(buf800, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf802 = buf801[0]
del buf801
# Topologically Sorted Source Nodes: [p1_55, p3_55, min_111, p2_55, img_27, neg_344, max_pool3d_201], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf805 = torch.ops.aten.max_pool3d_with_indices.default(buf804, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf806 = buf805[0]
del buf805
# Topologically Sorted Source Nodes: [p1_55, p3_55, min_111, p2_55, img_27, neg_346, max_pool3d_202], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf809 = torch.ops.aten.max_pool3d_with_indices.default(buf808, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf810 = buf809[0]
del buf809
buf812 = buf808; del buf808 # reuse
buf816 = buf804; del buf804 # reuse
buf820 = buf800; del buf800 # reuse
buf828 = buf796; del buf796 # reuse
buf832 = buf794; del buf794 # reuse
buf836 = buf790; del buf790 # reuse
# Topologically Sorted Source Nodes: [p1_57, p3_57, min_115, p2_57, img_28, neg_348, neg_350, neg_352, neg_354, neg_356, neg_358], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf802, buf810, buf806, buf812, buf816, buf820, buf828, buf832, buf836, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_57, p3_57, min_115, p2_57, img_28, neg_348, max_pool3d_203], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf813 = torch.ops.aten.max_pool3d_with_indices.default(buf812, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf812
buf814 = buf813[0]
del buf813
# Topologically Sorted Source Nodes: [p1_57, p3_57, min_115, p2_57, img_28, neg_350, max_pool3d_204], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf817 = torch.ops.aten.max_pool3d_with_indices.default(buf816, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf816
buf818 = buf817[0]
del buf817
# Topologically Sorted Source Nodes: [p1_57, p3_57, min_115, p2_57, img_28, neg_352, max_pool3d_205], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf821 = torch.ops.aten.max_pool3d_with_indices.default(buf820, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf820
buf822 = buf821[0]
del buf821
buf824 = buf814; del buf814 # reuse
# Topologically Sorted Source Nodes: [p1_58, p3_58, min_117, p2_58, min_118], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf824, buf822, buf818, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_58, p3_58, min_117, p2_58, min_118, img1_29], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf825 = torch.ops.aten.max_pool3d_with_indices.default(buf824, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf826 = buf825[0]
del buf825
# Topologically Sorted Source Nodes: [p1_57, p3_57, min_115, p2_57, img_28, neg_354, max_pool3d_207], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf829 = torch.ops.aten.max_pool3d_with_indices.default(buf828, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf830 = buf829[0]
del buf829
# Topologically Sorted Source Nodes: [p1_57, p3_57, min_115, p2_57, img_28, neg_356, max_pool3d_208], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf833 = torch.ops.aten.max_pool3d_with_indices.default(buf832, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf834 = buf833[0]
del buf833
# Topologically Sorted Source Nodes: [p1_57, p3_57, min_115, p2_57, img_28, neg_358, max_pool3d_209], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf837 = torch.ops.aten.max_pool3d_with_indices.default(buf836, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf838 = buf837[0]
del buf837
buf840 = buf836; del buf836 # reuse
buf844 = buf832; del buf832 # reuse
buf848 = buf828; del buf828 # reuse
buf856 = buf824; del buf824 # reuse
buf860 = buf822; del buf822 # reuse
buf864 = buf818; del buf818 # reuse
# Topologically Sorted Source Nodes: [p1_59, p3_59, min_119, p2_59, img_29, neg_360, neg_362, neg_364, neg_366, neg_368, neg_370], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf830, buf838, buf834, buf840, buf844, buf848, buf856, buf860, buf864, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_59, p3_59, min_119, p2_59, img_29, neg_360, max_pool3d_210], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf841 = torch.ops.aten.max_pool3d_with_indices.default(buf840, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf840
buf842 = buf841[0]
del buf841
# Topologically Sorted Source Nodes: [p1_59, p3_59, min_119, p2_59, img_29, neg_362, max_pool3d_211], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf845 = torch.ops.aten.max_pool3d_with_indices.default(buf844, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf844
buf846 = buf845[0]
del buf845
# Topologically Sorted Source Nodes: [p1_59, p3_59, min_119, p2_59, img_29, neg_364, max_pool3d_212], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf849 = torch.ops.aten.max_pool3d_with_indices.default(buf848, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf848
buf850 = buf849[0]
del buf849
buf852 = buf842; del buf842 # reuse
# Topologically Sorted Source Nodes: [p1_60, p3_60, min_121, p2_60, min_122], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf852, buf850, buf846, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_60, p3_60, min_121, p2_60, min_122, img1_30], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf853 = torch.ops.aten.max_pool3d_with_indices.default(buf852, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf854 = buf853[0]
del buf853
# Topologically Sorted Source Nodes: [p1_59, p3_59, min_119, p2_59, img_29, neg_366, max_pool3d_214], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf857 = torch.ops.aten.max_pool3d_with_indices.default(buf856, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf858 = buf857[0]
del buf857
# Topologically Sorted Source Nodes: [p1_59, p3_59, min_119, p2_59, img_29, neg_368, max_pool3d_215], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf861 = torch.ops.aten.max_pool3d_with_indices.default(buf860, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf862 = buf861[0]
del buf861
# Topologically Sorted Source Nodes: [p1_59, p3_59, min_119, p2_59, img_29, neg_370, max_pool3d_216], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf865 = torch.ops.aten.max_pool3d_with_indices.default(buf864, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf866 = buf865[0]
del buf865
buf868 = buf864; del buf864 # reuse
buf872 = buf860; del buf860 # reuse
buf876 = buf856; del buf856 # reuse
buf884 = buf852; del buf852 # reuse
buf888 = buf850; del buf850 # reuse
buf892 = buf846; del buf846 # reuse
# Topologically Sorted Source Nodes: [p1_61, p3_61, min_123, p2_61, img_30, neg_372, neg_374, neg_376, neg_378, neg_380, neg_382], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf858, buf866, buf862, buf868, buf872, buf876, buf884, buf888, buf892, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_61, p3_61, min_123, p2_61, img_30, neg_372, max_pool3d_217], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf869 = torch.ops.aten.max_pool3d_with_indices.default(buf868, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf868
buf870 = buf869[0]
del buf869
# Topologically Sorted Source Nodes: [p1_61, p3_61, min_123, p2_61, img_30, neg_374, max_pool3d_218], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf873 = torch.ops.aten.max_pool3d_with_indices.default(buf872, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf872
buf874 = buf873[0]
del buf873
# Topologically Sorted Source Nodes: [p1_61, p3_61, min_123, p2_61, img_30, neg_376, max_pool3d_219], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf877 = torch.ops.aten.max_pool3d_with_indices.default(buf876, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf876
buf878 = buf877[0]
del buf877
buf880 = buf870; del buf870 # reuse
# Topologically Sorted Source Nodes: [p1_62, p3_62, min_125, p2_62, min_126], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf880, buf878, buf874, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_62, p3_62, min_125, p2_62, min_126, img1_31], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf881 = torch.ops.aten.max_pool3d_with_indices.default(buf880, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf882 = buf881[0]
del buf881
# Topologically Sorted Source Nodes: [p1_61, p3_61, min_123, p2_61, img_30, neg_378, max_pool3d_221], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf885 = torch.ops.aten.max_pool3d_with_indices.default(buf884, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf886 = buf885[0]
del buf885
# Topologically Sorted Source Nodes: [p1_61, p3_61, min_123, p2_61, img_30, neg_380, max_pool3d_222], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf889 = torch.ops.aten.max_pool3d_with_indices.default(buf888, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf890 = buf889[0]
del buf889
# Topologically Sorted Source Nodes: [p1_61, p3_61, min_123, p2_61, img_30, neg_382, max_pool3d_223], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf893 = torch.ops.aten.max_pool3d_with_indices.default(buf892, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf894 = buf893[0]
del buf893
buf896 = buf892; del buf892 # reuse
buf900 = buf888; del buf888 # reuse
buf904 = buf884; del buf884 # reuse
buf912 = buf880; del buf880 # reuse
buf916 = buf878; del buf878 # reuse
buf920 = buf874; del buf874 # reuse
# Topologically Sorted Source Nodes: [p1_63, p3_63, min_127, p2_63, img_31, neg_384, neg_386, neg_388, neg_390, neg_392, neg_394], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf886, buf894, buf890, buf896, buf900, buf904, buf912, buf916, buf920, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_63, p3_63, min_127, p2_63, img_31, neg_384, max_pool3d_224], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf897 = torch.ops.aten.max_pool3d_with_indices.default(buf896, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf896
buf898 = buf897[0]
del buf897
# Topologically Sorted Source Nodes: [p1_63, p3_63, min_127, p2_63, img_31, neg_386, max_pool3d_225], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf901 = torch.ops.aten.max_pool3d_with_indices.default(buf900, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf900
buf902 = buf901[0]
del buf901
# Topologically Sorted Source Nodes: [p1_63, p3_63, min_127, p2_63, img_31, neg_388, max_pool3d_226], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf905 = torch.ops.aten.max_pool3d_with_indices.default(buf904, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf904
buf906 = buf905[0]
del buf905
buf908 = buf898; del buf898 # reuse
# Topologically Sorted Source Nodes: [p1_64, p3_64, min_129, p2_64, min_130], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf908, buf906, buf902, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_64, p3_64, min_129, p2_64, min_130, img1_32], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf909 = torch.ops.aten.max_pool3d_with_indices.default(buf908, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf910 = buf909[0]
del buf909
# Topologically Sorted Source Nodes: [p1_63, p3_63, min_127, p2_63, img_31, neg_390, max_pool3d_228], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf913 = torch.ops.aten.max_pool3d_with_indices.default(buf912, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf914 = buf913[0]
del buf913
# Topologically Sorted Source Nodes: [p1_63, p3_63, min_127, p2_63, img_31, neg_392, max_pool3d_229], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf917 = torch.ops.aten.max_pool3d_with_indices.default(buf916, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf918 = buf917[0]
del buf917
# Topologically Sorted Source Nodes: [p1_63, p3_63, min_127, p2_63, img_31, neg_394, max_pool3d_230], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf921 = torch.ops.aten.max_pool3d_with_indices.default(buf920, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf922 = buf921[0]
del buf921
buf924 = buf920; del buf920 # reuse
buf928 = buf916; del buf916 # reuse
buf932 = buf912; del buf912 # reuse
buf940 = buf908; del buf908 # reuse
buf944 = buf906; del buf906 # reuse
buf948 = buf902; del buf902 # reuse
# Topologically Sorted Source Nodes: [p1_65, p3_65, min_131, p2_65, img_32, neg_396, neg_398, neg_400, neg_402, neg_404, neg_406], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf914, buf922, buf918, buf924, buf928, buf932, buf940, buf944, buf948, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_65, p3_65, min_131, p2_65, img_32, neg_396, max_pool3d_231], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf925 = torch.ops.aten.max_pool3d_with_indices.default(buf924, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf924
buf926 = buf925[0]
del buf925
# Topologically Sorted Source Nodes: [p1_65, p3_65, min_131, p2_65, img_32, neg_398, max_pool3d_232], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf929 = torch.ops.aten.max_pool3d_with_indices.default(buf928, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf928
buf930 = buf929[0]
del buf929
# Topologically Sorted Source Nodes: [p1_65, p3_65, min_131, p2_65, img_32, neg_400, max_pool3d_233], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf933 = torch.ops.aten.max_pool3d_with_indices.default(buf932, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf932
buf934 = buf933[0]
del buf933
buf936 = buf926; del buf926 # reuse
# Topologically Sorted Source Nodes: [p1_66, p3_66, min_133, p2_66, min_134], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf936, buf934, buf930, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_66, p3_66, min_133, p2_66, min_134, img1_33], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf937 = torch.ops.aten.max_pool3d_with_indices.default(buf936, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf938 = buf937[0]
del buf937
# Topologically Sorted Source Nodes: [p1_65, p3_65, min_131, p2_65, img_32, neg_402, max_pool3d_235], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf941 = torch.ops.aten.max_pool3d_with_indices.default(buf940, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf942 = buf941[0]
del buf941
# Topologically Sorted Source Nodes: [p1_65, p3_65, min_131, p2_65, img_32, neg_404, max_pool3d_236], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf945 = torch.ops.aten.max_pool3d_with_indices.default(buf944, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf946 = buf945[0]
del buf945
# Topologically Sorted Source Nodes: [p1_65, p3_65, min_131, p2_65, img_32, neg_406, max_pool3d_237], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf949 = torch.ops.aten.max_pool3d_with_indices.default(buf948, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf950 = buf949[0]
del buf949
buf952 = buf948; del buf948 # reuse
buf956 = buf944; del buf944 # reuse
buf960 = buf940; del buf940 # reuse
buf968 = buf936; del buf936 # reuse
buf972 = buf934; del buf934 # reuse
buf976 = buf930; del buf930 # reuse
# Topologically Sorted Source Nodes: [p1_67, p3_67, min_135, p2_67, img_33, neg_408, neg_410, neg_412, neg_414, neg_416, neg_418], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf942, buf950, buf946, buf952, buf956, buf960, buf968, buf972, buf976, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_67, p3_67, min_135, p2_67, img_33, neg_408, max_pool3d_238], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf953 = torch.ops.aten.max_pool3d_with_indices.default(buf952, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf952
buf954 = buf953[0]
del buf953
# Topologically Sorted Source Nodes: [p1_67, p3_67, min_135, p2_67, img_33, neg_410, max_pool3d_239], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf957 = torch.ops.aten.max_pool3d_with_indices.default(buf956, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf956
buf958 = buf957[0]
del buf957
# Topologically Sorted Source Nodes: [p1_67, p3_67, min_135, p2_67, img_33, neg_412, max_pool3d_240], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf961 = torch.ops.aten.max_pool3d_with_indices.default(buf960, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf960
buf962 = buf961[0]
del buf961
buf964 = buf954; del buf954 # reuse
# Topologically Sorted Source Nodes: [p1_68, p3_68, min_137, p2_68, min_138], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf964, buf962, buf958, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_68, p3_68, min_137, p2_68, min_138, img1_34], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf965 = torch.ops.aten.max_pool3d_with_indices.default(buf964, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf966 = buf965[0]
del buf965
# Topologically Sorted Source Nodes: [p1_67, p3_67, min_135, p2_67, img_33, neg_414, max_pool3d_242], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf969 = torch.ops.aten.max_pool3d_with_indices.default(buf968, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf970 = buf969[0]
del buf969
# Topologically Sorted Source Nodes: [p1_67, p3_67, min_135, p2_67, img_33, neg_416, max_pool3d_243], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf973 = torch.ops.aten.max_pool3d_with_indices.default(buf972, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf974 = buf973[0]
del buf973
# Topologically Sorted Source Nodes: [p1_67, p3_67, min_135, p2_67, img_33, neg_418, max_pool3d_244], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf977 = torch.ops.aten.max_pool3d_with_indices.default(buf976, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf978 = buf977[0]
del buf977
buf980 = buf976; del buf976 # reuse
buf984 = buf972; del buf972 # reuse
buf988 = buf968; del buf968 # reuse
buf996 = buf964; del buf964 # reuse
buf1000 = buf962; del buf962 # reuse
buf1004 = buf958; del buf958 # reuse
# Topologically Sorted Source Nodes: [p1_69, p3_69, min_139, p2_69, img_34, neg_420, neg_422, neg_424, neg_426, neg_428, neg_430], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf970, buf978, buf974, buf980, buf984, buf988, buf996, buf1000, buf1004, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_69, p3_69, min_139, p2_69, img_34, neg_420, max_pool3d_245], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf981 = torch.ops.aten.max_pool3d_with_indices.default(buf980, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf980
buf982 = buf981[0]
del buf981
# Topologically Sorted Source Nodes: [p1_69, p3_69, min_139, p2_69, img_34, neg_422, max_pool3d_246], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf985 = torch.ops.aten.max_pool3d_with_indices.default(buf984, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf984
buf986 = buf985[0]
del buf985
# Topologically Sorted Source Nodes: [p1_69, p3_69, min_139, p2_69, img_34, neg_424, max_pool3d_247], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf989 = torch.ops.aten.max_pool3d_with_indices.default(buf988, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf988
buf990 = buf989[0]
del buf989
buf992 = buf982; del buf982 # reuse
# Topologically Sorted Source Nodes: [p1_70, p3_70, min_141, p2_70, min_142], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf992, buf990, buf986, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_70, p3_70, min_141, p2_70, min_142, img1_35], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf993 = torch.ops.aten.max_pool3d_with_indices.default(buf992, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf994 = buf993[0]
del buf993
# Topologically Sorted Source Nodes: [p1_69, p3_69, min_139, p2_69, img_34, neg_426, max_pool3d_249], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf997 = torch.ops.aten.max_pool3d_with_indices.default(buf996, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf998 = buf997[0]
del buf997
# Topologically Sorted Source Nodes: [p1_69, p3_69, min_139, p2_69, img_34, neg_428, max_pool3d_250], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1001 = torch.ops.aten.max_pool3d_with_indices.default(buf1000, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1002 = buf1001[0]
del buf1001
# Topologically Sorted Source Nodes: [p1_69, p3_69, min_139, p2_69, img_34, neg_430, max_pool3d_251], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1005 = torch.ops.aten.max_pool3d_with_indices.default(buf1004, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1006 = buf1005[0]
del buf1005
buf1008 = buf1004; del buf1004 # reuse
buf1012 = buf1000; del buf1000 # reuse
buf1016 = buf996; del buf996 # reuse
buf1024 = buf992; del buf992 # reuse
buf1028 = buf990; del buf990 # reuse
buf1032 = buf986; del buf986 # reuse
# Topologically Sorted Source Nodes: [p1_71, p3_71, min_143, p2_71, img_35, neg_432, neg_434, neg_436, neg_438, neg_440, neg_442], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf998, buf1006, buf1002, buf1008, buf1012, buf1016, buf1024, buf1028, buf1032, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_71, p3_71, min_143, p2_71, img_35, neg_432, max_pool3d_252], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1009 = torch.ops.aten.max_pool3d_with_indices.default(buf1008, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1008
buf1010 = buf1009[0]
del buf1009
# Topologically Sorted Source Nodes: [p1_71, p3_71, min_143, p2_71, img_35, neg_434, max_pool3d_253], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1013 = torch.ops.aten.max_pool3d_with_indices.default(buf1012, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1012
buf1014 = buf1013[0]
del buf1013
# Topologically Sorted Source Nodes: [p1_71, p3_71, min_143, p2_71, img_35, neg_436, max_pool3d_254], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1017 = torch.ops.aten.max_pool3d_with_indices.default(buf1016, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1016
buf1018 = buf1017[0]
del buf1017
buf1020 = buf1010; del buf1010 # reuse
# Topologically Sorted Source Nodes: [p1_72, p3_72, min_145, p2_72, min_146], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf1020, buf1018, buf1014, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_72, p3_72, min_145, p2_72, min_146, img1_36], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1021 = torch.ops.aten.max_pool3d_with_indices.default(buf1020, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1022 = buf1021[0]
del buf1021
# Topologically Sorted Source Nodes: [p1_71, p3_71, min_143, p2_71, img_35, neg_438, max_pool3d_256], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1025 = torch.ops.aten.max_pool3d_with_indices.default(buf1024, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1026 = buf1025[0]
del buf1025
# Topologically Sorted Source Nodes: [p1_71, p3_71, min_143, p2_71, img_35, neg_440, max_pool3d_257], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1029 = torch.ops.aten.max_pool3d_with_indices.default(buf1028, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1030 = buf1029[0]
del buf1029
# Topologically Sorted Source Nodes: [p1_71, p3_71, min_143, p2_71, img_35, neg_442, max_pool3d_258], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1033 = torch.ops.aten.max_pool3d_with_indices.default(buf1032, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1034 = buf1033[0]
del buf1033
buf1036 = buf1032; del buf1032 # reuse
buf1040 = buf1028; del buf1028 # reuse
buf1044 = buf1024; del buf1024 # reuse
buf1052 = buf1020; del buf1020 # reuse
buf1056 = buf1018; del buf1018 # reuse
buf1060 = buf1014; del buf1014 # reuse
# Topologically Sorted Source Nodes: [p1_73, p3_73, min_147, p2_73, img_36, neg_444, neg_446, neg_448, neg_450, neg_452, neg_454], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf1026, buf1034, buf1030, buf1036, buf1040, buf1044, buf1052, buf1056, buf1060, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_73, p3_73, min_147, p2_73, img_36, neg_444, max_pool3d_259], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1037 = torch.ops.aten.max_pool3d_with_indices.default(buf1036, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1036
buf1038 = buf1037[0]
del buf1037
# Topologically Sorted Source Nodes: [p1_73, p3_73, min_147, p2_73, img_36, neg_446, max_pool3d_260], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1041 = torch.ops.aten.max_pool3d_with_indices.default(buf1040, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1040
buf1042 = buf1041[0]
del buf1041
# Topologically Sorted Source Nodes: [p1_73, p3_73, min_147, p2_73, img_36, neg_448, max_pool3d_261], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1045 = torch.ops.aten.max_pool3d_with_indices.default(buf1044, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1044
buf1046 = buf1045[0]
del buf1045
buf1048 = buf1038; del buf1038 # reuse
# Topologically Sorted Source Nodes: [p1_74, p3_74, min_149, p2_74, min_150], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf1048, buf1046, buf1042, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_74, p3_74, min_149, p2_74, min_150, img1_37], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1049 = torch.ops.aten.max_pool3d_with_indices.default(buf1048, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1050 = buf1049[0]
del buf1049
# Topologically Sorted Source Nodes: [p1_73, p3_73, min_147, p2_73, img_36, neg_450, max_pool3d_263], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1053 = torch.ops.aten.max_pool3d_with_indices.default(buf1052, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1054 = buf1053[0]
del buf1053
# Topologically Sorted Source Nodes: [p1_73, p3_73, min_147, p2_73, img_36, neg_452, max_pool3d_264], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1057 = torch.ops.aten.max_pool3d_with_indices.default(buf1056, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1058 = buf1057[0]
del buf1057
# Topologically Sorted Source Nodes: [p1_73, p3_73, min_147, p2_73, img_36, neg_454, max_pool3d_265], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1061 = torch.ops.aten.max_pool3d_with_indices.default(buf1060, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1062 = buf1061[0]
del buf1061
buf1064 = buf1060; del buf1060 # reuse
buf1068 = buf1056; del buf1056 # reuse
buf1072 = buf1052; del buf1052 # reuse
buf1080 = buf1048; del buf1048 # reuse
buf1084 = buf1046; del buf1046 # reuse
buf1088 = buf1042; del buf1042 # reuse
# Topologically Sorted Source Nodes: [p1_75, p3_75, min_151, p2_75, img_37, neg_456, neg_458, neg_460, neg_462, neg_464, neg_466], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf1054, buf1062, buf1058, buf1064, buf1068, buf1072, buf1080, buf1084, buf1088, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_75, p3_75, min_151, p2_75, img_37, neg_456, max_pool3d_266], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1065 = torch.ops.aten.max_pool3d_with_indices.default(buf1064, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1064
buf1066 = buf1065[0]
del buf1065
# Topologically Sorted Source Nodes: [p1_75, p3_75, min_151, p2_75, img_37, neg_458, max_pool3d_267], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1069 = torch.ops.aten.max_pool3d_with_indices.default(buf1068, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1068
buf1070 = buf1069[0]
del buf1069
# Topologically Sorted Source Nodes: [p1_75, p3_75, min_151, p2_75, img_37, neg_460, max_pool3d_268], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1073 = torch.ops.aten.max_pool3d_with_indices.default(buf1072, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1072
buf1074 = buf1073[0]
del buf1073
buf1076 = buf1066; del buf1066 # reuse
# Topologically Sorted Source Nodes: [p1_76, p3_76, min_153, p2_76, min_154], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf1076, buf1074, buf1070, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_76, p3_76, min_153, p2_76, min_154, img1_38], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1077 = torch.ops.aten.max_pool3d_with_indices.default(buf1076, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1078 = buf1077[0]
del buf1077
# Topologically Sorted Source Nodes: [p1_75, p3_75, min_151, p2_75, img_37, neg_462, max_pool3d_270], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1081 = torch.ops.aten.max_pool3d_with_indices.default(buf1080, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1082 = buf1081[0]
del buf1081
# Topologically Sorted Source Nodes: [p1_75, p3_75, min_151, p2_75, img_37, neg_464, max_pool3d_271], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1085 = torch.ops.aten.max_pool3d_with_indices.default(buf1084, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1086 = buf1085[0]
del buf1085
# Topologically Sorted Source Nodes: [p1_75, p3_75, min_151, p2_75, img_37, neg_466, max_pool3d_272], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1089 = torch.ops.aten.max_pool3d_with_indices.default(buf1088, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1090 = buf1089[0]
del buf1089
buf1092 = buf1088; del buf1088 # reuse
buf1096 = buf1084; del buf1084 # reuse
buf1100 = buf1080; del buf1080 # reuse
buf1108 = buf1076; del buf1076 # reuse
buf1112 = buf1074; del buf1074 # reuse
buf1116 = buf1070; del buf1070 # reuse
# Topologically Sorted Source Nodes: [p1_77, p3_77, min_155, p2_77, img_38, neg_468, neg_470, neg_472, neg_474, neg_476, neg_478], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf1082, buf1090, buf1086, buf1092, buf1096, buf1100, buf1108, buf1112, buf1116, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_77, p3_77, min_155, p2_77, img_38, neg_468, max_pool3d_273], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1093 = torch.ops.aten.max_pool3d_with_indices.default(buf1092, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1092
buf1094 = buf1093[0]
del buf1093
# Topologically Sorted Source Nodes: [p1_77, p3_77, min_155, p2_77, img_38, neg_470, max_pool3d_274], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1097 = torch.ops.aten.max_pool3d_with_indices.default(buf1096, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1096
buf1098 = buf1097[0]
del buf1097
# Topologically Sorted Source Nodes: [p1_77, p3_77, min_155, p2_77, img_38, neg_472, max_pool3d_275], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1101 = torch.ops.aten.max_pool3d_with_indices.default(buf1100, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1100
buf1102 = buf1101[0]
del buf1101
buf1104 = buf1094; del buf1094 # reuse
# Topologically Sorted Source Nodes: [p1_78, p3_78, min_157, p2_78, min_158], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf1104, buf1102, buf1098, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_78, p3_78, min_157, p2_78, min_158, img1_39], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1105 = torch.ops.aten.max_pool3d_with_indices.default(buf1104, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1106 = buf1105[0]
del buf1105
# Topologically Sorted Source Nodes: [p1_77, p3_77, min_155, p2_77, img_38, neg_474, max_pool3d_277], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1109 = torch.ops.aten.max_pool3d_with_indices.default(buf1108, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1110 = buf1109[0]
del buf1109
# Topologically Sorted Source Nodes: [p1_77, p3_77, min_155, p2_77, img_38, neg_476, max_pool3d_278], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1113 = torch.ops.aten.max_pool3d_with_indices.default(buf1112, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1114 = buf1113[0]
del buf1113
# Topologically Sorted Source Nodes: [p1_77, p3_77, min_155, p2_77, img_38, neg_478, max_pool3d_279], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1117 = torch.ops.aten.max_pool3d_with_indices.default(buf1116, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1118 = buf1117[0]
del buf1117
buf1120 = buf1116; del buf1116 # reuse
buf1124 = buf1112; del buf1112 # reuse
buf1128 = buf1108; del buf1108 # reuse
buf1136 = buf1104; del buf1104 # reuse
buf1140 = buf1102; del buf1102 # reuse
buf1144 = buf1098; del buf1098 # reuse
# Topologically Sorted Source Nodes: [p1_79, p3_79, min_159, p2_79, img_39, neg_480, neg_482, neg_484, neg_486, neg_488, neg_490], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf1110, buf1118, buf1114, buf1120, buf1124, buf1128, buf1136, buf1140, buf1144, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_79, p3_79, min_159, p2_79, img_39, neg_480, max_pool3d_280], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1121 = torch.ops.aten.max_pool3d_with_indices.default(buf1120, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1120
buf1122 = buf1121[0]
del buf1121
# Topologically Sorted Source Nodes: [p1_79, p3_79, min_159, p2_79, img_39, neg_482, max_pool3d_281], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1125 = torch.ops.aten.max_pool3d_with_indices.default(buf1124, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1124
buf1126 = buf1125[0]
del buf1125
# Topologically Sorted Source Nodes: [p1_79, p3_79, min_159, p2_79, img_39, neg_484, max_pool3d_282], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1129 = torch.ops.aten.max_pool3d_with_indices.default(buf1128, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1128
buf1130 = buf1129[0]
del buf1129
buf1132 = buf1122; del buf1122 # reuse
# Topologically Sorted Source Nodes: [p1_80, p3_80, min_161, p2_80, min_162], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf1132, buf1130, buf1126, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_80, p3_80, min_161, p2_80, min_162, img1_40], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1133 = torch.ops.aten.max_pool3d_with_indices.default(buf1132, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1134 = buf1133[0]
del buf1133
# Topologically Sorted Source Nodes: [p1_79, p3_79, min_159, p2_79, img_39, neg_486, max_pool3d_284], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1137 = torch.ops.aten.max_pool3d_with_indices.default(buf1136, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1138 = buf1137[0]
del buf1137
# Topologically Sorted Source Nodes: [p1_79, p3_79, min_159, p2_79, img_39, neg_488, max_pool3d_285], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1141 = torch.ops.aten.max_pool3d_with_indices.default(buf1140, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1142 = buf1141[0]
del buf1141
# Topologically Sorted Source Nodes: [p1_79, p3_79, min_159, p2_79, img_39, neg_490, max_pool3d_286], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1145 = torch.ops.aten.max_pool3d_with_indices.default(buf1144, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1146 = buf1145[0]
del buf1145
buf1148 = buf1144; del buf1144 # reuse
buf1152 = buf1140; del buf1140 # reuse
buf1156 = buf1136; del buf1136 # reuse
buf1164 = buf1132; del buf1132 # reuse
buf1168 = buf1130; del buf1130 # reuse
buf1172 = buf1126; del buf1126 # reuse
# Topologically Sorted Source Nodes: [p1_81, p3_81, min_163, p2_81, img_40, neg_492, neg_494, neg_496, neg_498, neg_500, neg_502], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf1138, buf1146, buf1142, buf1148, buf1152, buf1156, buf1164, buf1168, buf1172, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_81, p3_81, min_163, p2_81, img_40, neg_492, max_pool3d_287], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1149 = torch.ops.aten.max_pool3d_with_indices.default(buf1148, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1148
buf1150 = buf1149[0]
del buf1149
# Topologically Sorted Source Nodes: [p1_81, p3_81, min_163, p2_81, img_40, neg_494, max_pool3d_288], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1153 = torch.ops.aten.max_pool3d_with_indices.default(buf1152, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1152
buf1154 = buf1153[0]
del buf1153
# Topologically Sorted Source Nodes: [p1_81, p3_81, min_163, p2_81, img_40, neg_496, max_pool3d_289], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1157 = torch.ops.aten.max_pool3d_with_indices.default(buf1156, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1156
buf1158 = buf1157[0]
del buf1157
buf1160 = buf1150; del buf1150 # reuse
# Topologically Sorted Source Nodes: [p1_82, p3_82, min_165, p2_82, min_166], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf1160, buf1158, buf1154, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_82, p3_82, min_165, p2_82, min_166, img1_41], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1161 = torch.ops.aten.max_pool3d_with_indices.default(buf1160, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1162 = buf1161[0]
del buf1161
# Topologically Sorted Source Nodes: [p1_81, p3_81, min_163, p2_81, img_40, neg_498, max_pool3d_291], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1165 = torch.ops.aten.max_pool3d_with_indices.default(buf1164, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1166 = buf1165[0]
del buf1165
# Topologically Sorted Source Nodes: [p1_81, p3_81, min_163, p2_81, img_40, neg_500, max_pool3d_292], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1169 = torch.ops.aten.max_pool3d_with_indices.default(buf1168, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1170 = buf1169[0]
del buf1169
# Topologically Sorted Source Nodes: [p1_81, p3_81, min_163, p2_81, img_40, neg_502, max_pool3d_293], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1173 = torch.ops.aten.max_pool3d_with_indices.default(buf1172, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1174 = buf1173[0]
del buf1173
buf1176 = buf1172; del buf1172 # reuse
buf1180 = buf1168; del buf1168 # reuse
buf1184 = buf1164; del buf1164 # reuse
buf1192 = buf1160; del buf1160 # reuse
buf1196 = buf1158; del buf1158 # reuse
buf1200 = buf1154; del buf1154 # reuse
# Topologically Sorted Source Nodes: [p1_83, p3_83, min_167, p2_83, img_41, neg_504, neg_506, neg_508, neg_510, neg_512, neg_514], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf1166, buf1174, buf1170, buf1176, buf1180, buf1184, buf1192, buf1196, buf1200, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_83, p3_83, min_167, p2_83, img_41, neg_504, max_pool3d_294], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1177 = torch.ops.aten.max_pool3d_with_indices.default(buf1176, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1176
buf1178 = buf1177[0]
del buf1177
# Topologically Sorted Source Nodes: [p1_83, p3_83, min_167, p2_83, img_41, neg_506, max_pool3d_295], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1181 = torch.ops.aten.max_pool3d_with_indices.default(buf1180, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1180
buf1182 = buf1181[0]
del buf1181
# Topologically Sorted Source Nodes: [p1_83, p3_83, min_167, p2_83, img_41, neg_508, max_pool3d_296], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1185 = torch.ops.aten.max_pool3d_with_indices.default(buf1184, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1184
buf1186 = buf1185[0]
del buf1185
buf1188 = buf1178; del buf1178 # reuse
# Topologically Sorted Source Nodes: [p1_84, p3_84, min_169, p2_84, min_170], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf1188, buf1186, buf1182, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_84, p3_84, min_169, p2_84, min_170, img1_42], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1189 = torch.ops.aten.max_pool3d_with_indices.default(buf1188, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1190 = buf1189[0]
del buf1189
# Topologically Sorted Source Nodes: [p1_83, p3_83, min_167, p2_83, img_41, neg_510, max_pool3d_298], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1193 = torch.ops.aten.max_pool3d_with_indices.default(buf1192, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1194 = buf1193[0]
del buf1193
# Topologically Sorted Source Nodes: [p1_83, p3_83, min_167, p2_83, img_41, neg_512, max_pool3d_299], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1197 = torch.ops.aten.max_pool3d_with_indices.default(buf1196, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1198 = buf1197[0]
del buf1197
# Topologically Sorted Source Nodes: [p1_83, p3_83, min_167, p2_83, img_41, neg_514, max_pool3d_300], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1201 = torch.ops.aten.max_pool3d_with_indices.default(buf1200, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1202 = buf1201[0]
del buf1201
buf1204 = buf1200; del buf1200 # reuse
buf1208 = buf1196; del buf1196 # reuse
buf1212 = buf1192; del buf1192 # reuse
buf1220 = buf1188; del buf1188 # reuse
buf1224 = buf1186; del buf1186 # reuse
buf1228 = buf1182; del buf1182 # reuse
# Topologically Sorted Source Nodes: [p1_85, p3_85, min_171, p2_85, img_42, neg_516, neg_518, neg_520, neg_522, neg_524, neg_526], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf1194, buf1202, buf1198, buf1204, buf1208, buf1212, buf1220, buf1224, buf1228, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_85, p3_85, min_171, p2_85, img_42, neg_516, max_pool3d_301], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1205 = torch.ops.aten.max_pool3d_with_indices.default(buf1204, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1204
buf1206 = buf1205[0]
del buf1205
# Topologically Sorted Source Nodes: [p1_85, p3_85, min_171, p2_85, img_42, neg_518, max_pool3d_302], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1209 = torch.ops.aten.max_pool3d_with_indices.default(buf1208, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1208
buf1210 = buf1209[0]
del buf1209
# Topologically Sorted Source Nodes: [p1_85, p3_85, min_171, p2_85, img_42, neg_520, max_pool3d_303], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1213 = torch.ops.aten.max_pool3d_with_indices.default(buf1212, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1212
buf1214 = buf1213[0]
del buf1213
buf1216 = buf1206; del buf1206 # reuse
# Topologically Sorted Source Nodes: [p1_86, p3_86, min_173, p2_86, min_174], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf1216, buf1214, buf1210, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_86, p3_86, min_173, p2_86, min_174, img1_43], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1217 = torch.ops.aten.max_pool3d_with_indices.default(buf1216, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1218 = buf1217[0]
del buf1217
# Topologically Sorted Source Nodes: [p1_85, p3_85, min_171, p2_85, img_42, neg_522, max_pool3d_305], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1221 = torch.ops.aten.max_pool3d_with_indices.default(buf1220, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1222 = buf1221[0]
del buf1221
# Topologically Sorted Source Nodes: [p1_85, p3_85, min_171, p2_85, img_42, neg_524, max_pool3d_306], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1225 = torch.ops.aten.max_pool3d_with_indices.default(buf1224, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1226 = buf1225[0]
del buf1225
# Topologically Sorted Source Nodes: [p1_85, p3_85, min_171, p2_85, img_42, neg_526, max_pool3d_307], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1229 = torch.ops.aten.max_pool3d_with_indices.default(buf1228, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1230 = buf1229[0]
del buf1229
buf1232 = buf1228; del buf1228 # reuse
buf1236 = buf1224; del buf1224 # reuse
buf1240 = buf1220; del buf1220 # reuse
buf1248 = buf1216; del buf1216 # reuse
buf1252 = buf1214; del buf1214 # reuse
buf1256 = buf1210; del buf1210 # reuse
# Topologically Sorted Source Nodes: [p1_87, p3_87, min_175, p2_87, img_43, neg_528, neg_530, neg_532, neg_534, neg_536, neg_538], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf1222, buf1230, buf1226, buf1232, buf1236, buf1240, buf1248, buf1252, buf1256, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_87, p3_87, min_175, p2_87, img_43, neg_528, max_pool3d_308], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1233 = torch.ops.aten.max_pool3d_with_indices.default(buf1232, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1232
buf1234 = buf1233[0]
del buf1233
# Topologically Sorted Source Nodes: [p1_87, p3_87, min_175, p2_87, img_43, neg_530, max_pool3d_309], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1237 = torch.ops.aten.max_pool3d_with_indices.default(buf1236, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1236
buf1238 = buf1237[0]
del buf1237
# Topologically Sorted Source Nodes: [p1_87, p3_87, min_175, p2_87, img_43, neg_532, max_pool3d_310], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1241 = torch.ops.aten.max_pool3d_with_indices.default(buf1240, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1240
buf1242 = buf1241[0]
del buf1241
buf1244 = buf1234; del buf1234 # reuse
# Topologically Sorted Source Nodes: [p1_88, p3_88, min_177, p2_88, min_178], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf1244, buf1242, buf1238, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_88, p3_88, min_177, p2_88, min_178, img1_44], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1245 = torch.ops.aten.max_pool3d_with_indices.default(buf1244, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1246 = buf1245[0]
del buf1245
# Topologically Sorted Source Nodes: [p1_87, p3_87, min_175, p2_87, img_43, neg_534, max_pool3d_312], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1249 = torch.ops.aten.max_pool3d_with_indices.default(buf1248, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1250 = buf1249[0]
del buf1249
# Topologically Sorted Source Nodes: [p1_87, p3_87, min_175, p2_87, img_43, neg_536, max_pool3d_313], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1253 = torch.ops.aten.max_pool3d_with_indices.default(buf1252, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1254 = buf1253[0]
del buf1253
# Topologically Sorted Source Nodes: [p1_87, p3_87, min_175, p2_87, img_43, neg_538, max_pool3d_314], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1257 = torch.ops.aten.max_pool3d_with_indices.default(buf1256, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1258 = buf1257[0]
del buf1257
buf1260 = buf1256; del buf1256 # reuse
buf1264 = buf1252; del buf1252 # reuse
buf1268 = buf1248; del buf1248 # reuse
buf1276 = buf1244; del buf1244 # reuse
buf1280 = buf1242; del buf1242 # reuse
buf1284 = buf1238; del buf1238 # reuse
# Topologically Sorted Source Nodes: [p1_89, p3_89, min_179, p2_89, img_44, neg_540, neg_542, neg_544, neg_546, neg_548, neg_550], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf1250, buf1258, buf1254, buf1260, buf1264, buf1268, buf1276, buf1280, buf1284, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_89, p3_89, min_179, p2_89, img_44, neg_540, max_pool3d_315], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1261 = torch.ops.aten.max_pool3d_with_indices.default(buf1260, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1260
buf1262 = buf1261[0]
del buf1261
# Topologically Sorted Source Nodes: [p1_89, p3_89, min_179, p2_89, img_44, neg_542, max_pool3d_316], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1265 = torch.ops.aten.max_pool3d_with_indices.default(buf1264, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1264
buf1266 = buf1265[0]
del buf1265
# Topologically Sorted Source Nodes: [p1_89, p3_89, min_179, p2_89, img_44, neg_544, max_pool3d_317], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1269 = torch.ops.aten.max_pool3d_with_indices.default(buf1268, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1268
buf1270 = buf1269[0]
del buf1269
buf1272 = buf1262; del buf1262 # reuse
# Topologically Sorted Source Nodes: [p1_90, p3_90, min_181, p2_90, min_182], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf1272, buf1270, buf1266, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_90, p3_90, min_181, p2_90, min_182, img1_45], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1273 = torch.ops.aten.max_pool3d_with_indices.default(buf1272, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1274 = buf1273[0]
del buf1273
# Topologically Sorted Source Nodes: [p1_89, p3_89, min_179, p2_89, img_44, neg_546, max_pool3d_319], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1277 = torch.ops.aten.max_pool3d_with_indices.default(buf1276, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1278 = buf1277[0]
del buf1277
# Topologically Sorted Source Nodes: [p1_89, p3_89, min_179, p2_89, img_44, neg_548, max_pool3d_320], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1281 = torch.ops.aten.max_pool3d_with_indices.default(buf1280, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1282 = buf1281[0]
del buf1281
# Topologically Sorted Source Nodes: [p1_89, p3_89, min_179, p2_89, img_44, neg_550, max_pool3d_321], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1285 = torch.ops.aten.max_pool3d_with_indices.default(buf1284, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1286 = buf1285[0]
del buf1285
buf1288 = buf1284; del buf1284 # reuse
buf1292 = buf1280; del buf1280 # reuse
buf1296 = buf1276; del buf1276 # reuse
buf1304 = buf1272; del buf1272 # reuse
buf1308 = buf1270; del buf1270 # reuse
buf1312 = buf1266; del buf1266 # reuse
# Topologically Sorted Source Nodes: [p1_91, p3_91, min_183, p2_91, img_45, neg_552, neg_554, neg_556, neg_558, neg_560, neg_562], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf1278, buf1286, buf1282, buf1288, buf1292, buf1296, buf1304, buf1308, buf1312, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_91, p3_91, min_183, p2_91, img_45, neg_552, max_pool3d_322], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1289 = torch.ops.aten.max_pool3d_with_indices.default(buf1288, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1288
buf1290 = buf1289[0]
del buf1289
# Topologically Sorted Source Nodes: [p1_91, p3_91, min_183, p2_91, img_45, neg_554, max_pool3d_323], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1293 = torch.ops.aten.max_pool3d_with_indices.default(buf1292, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1292
buf1294 = buf1293[0]
del buf1293
# Topologically Sorted Source Nodes: [p1_91, p3_91, min_183, p2_91, img_45, neg_556, max_pool3d_324], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1297 = torch.ops.aten.max_pool3d_with_indices.default(buf1296, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1296
buf1298 = buf1297[0]
del buf1297
buf1300 = buf1290; del buf1290 # reuse
# Topologically Sorted Source Nodes: [p1_92, p3_92, min_185, p2_92, min_186], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf1300, buf1298, buf1294, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_92, p3_92, min_185, p2_92, min_186, img1_46], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1301 = torch.ops.aten.max_pool3d_with_indices.default(buf1300, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1302 = buf1301[0]
del buf1301
# Topologically Sorted Source Nodes: [p1_91, p3_91, min_183, p2_91, img_45, neg_558, max_pool3d_326], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1305 = torch.ops.aten.max_pool3d_with_indices.default(buf1304, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1306 = buf1305[0]
del buf1305
# Topologically Sorted Source Nodes: [p1_91, p3_91, min_183, p2_91, img_45, neg_560, max_pool3d_327], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1309 = torch.ops.aten.max_pool3d_with_indices.default(buf1308, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1310 = buf1309[0]
del buf1309
# Topologically Sorted Source Nodes: [p1_91, p3_91, min_183, p2_91, img_45, neg_562, max_pool3d_328], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1313 = torch.ops.aten.max_pool3d_with_indices.default(buf1312, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1314 = buf1313[0]
del buf1313
buf1316 = buf1312; del buf1312 # reuse
buf1320 = buf1308; del buf1308 # reuse
buf1324 = buf1304; del buf1304 # reuse
buf1332 = buf1300; del buf1300 # reuse
buf1336 = buf1298; del buf1298 # reuse
buf1340 = buf1294; del buf1294 # reuse
# Topologically Sorted Source Nodes: [p1_93, p3_93, min_187, p2_93, img_46, neg_564, neg_566, neg_568, neg_570, neg_572, neg_574], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf1306, buf1314, buf1310, buf1316, buf1320, buf1324, buf1332, buf1336, buf1340, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_93, p3_93, min_187, p2_93, img_46, neg_564, max_pool3d_329], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1317 = torch.ops.aten.max_pool3d_with_indices.default(buf1316, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1316
buf1318 = buf1317[0]
del buf1317
# Topologically Sorted Source Nodes: [p1_93, p3_93, min_187, p2_93, img_46, neg_566, max_pool3d_330], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1321 = torch.ops.aten.max_pool3d_with_indices.default(buf1320, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1320
buf1322 = buf1321[0]
del buf1321
# Topologically Sorted Source Nodes: [p1_93, p3_93, min_187, p2_93, img_46, neg_568, max_pool3d_331], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1325 = torch.ops.aten.max_pool3d_with_indices.default(buf1324, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1324
buf1326 = buf1325[0]
del buf1325
buf1328 = buf1318; del buf1318 # reuse
# Topologically Sorted Source Nodes: [p1_94, p3_94, min_189, p2_94, min_190], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf1328, buf1326, buf1322, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_94, p3_94, min_189, p2_94, min_190, img1_47], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1329 = torch.ops.aten.max_pool3d_with_indices.default(buf1328, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1330 = buf1329[0]
del buf1329
# Topologically Sorted Source Nodes: [p1_93, p3_93, min_187, p2_93, img_46, neg_570, max_pool3d_333], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1333 = torch.ops.aten.max_pool3d_with_indices.default(buf1332, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1334 = buf1333[0]
del buf1333
# Topologically Sorted Source Nodes: [p1_93, p3_93, min_187, p2_93, img_46, neg_572, max_pool3d_334], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1337 = torch.ops.aten.max_pool3d_with_indices.default(buf1336, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1338 = buf1337[0]
del buf1337
# Topologically Sorted Source Nodes: [p1_93, p3_93, min_187, p2_93, img_46, neg_574, max_pool3d_335], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1341 = torch.ops.aten.max_pool3d_with_indices.default(buf1340, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1342 = buf1341[0]
del buf1341
buf1344 = buf1340; del buf1340 # reuse
buf1348 = buf1336; del buf1336 # reuse
buf1352 = buf1332; del buf1332 # reuse
buf1360 = buf1328; del buf1328 # reuse
buf1364 = buf1326; del buf1326 # reuse
buf1368 = buf1322; del buf1322 # reuse
# Topologically Sorted Source Nodes: [p1_95, p3_95, min_191, p2_95, img_47, neg_576, neg_578, neg_580, neg_582, neg_584, neg_586], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf1334, buf1342, buf1338, buf1344, buf1348, buf1352, buf1360, buf1364, buf1368, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_95, p3_95, min_191, p2_95, img_47, neg_576, max_pool3d_336], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1345 = torch.ops.aten.max_pool3d_with_indices.default(buf1344, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1344
buf1346 = buf1345[0]
del buf1345
# Topologically Sorted Source Nodes: [p1_95, p3_95, min_191, p2_95, img_47, neg_578, max_pool3d_337], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1349 = torch.ops.aten.max_pool3d_with_indices.default(buf1348, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1348
buf1350 = buf1349[0]
del buf1349
# Topologically Sorted Source Nodes: [p1_95, p3_95, min_191, p2_95, img_47, neg_580, max_pool3d_338], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1353 = torch.ops.aten.max_pool3d_with_indices.default(buf1352, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1352
buf1354 = buf1353[0]
del buf1353
buf1356 = buf1346; del buf1346 # reuse
# Topologically Sorted Source Nodes: [p1_96, p3_96, min_193, p2_96, min_194], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf1356, buf1354, buf1350, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_96, p3_96, min_193, p2_96, min_194, img1_48], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1357 = torch.ops.aten.max_pool3d_with_indices.default(buf1356, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1358 = buf1357[0]
del buf1357
# Topologically Sorted Source Nodes: [p1_95, p3_95, min_191, p2_95, img_47, neg_582, max_pool3d_340], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1361 = torch.ops.aten.max_pool3d_with_indices.default(buf1360, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1362 = buf1361[0]
del buf1361
# Topologically Sorted Source Nodes: [p1_95, p3_95, min_191, p2_95, img_47, neg_584, max_pool3d_341], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1365 = torch.ops.aten.max_pool3d_with_indices.default(buf1364, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1366 = buf1365[0]
del buf1365
# Topologically Sorted Source Nodes: [p1_95, p3_95, min_191, p2_95, img_47, neg_586, max_pool3d_342], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1369 = torch.ops.aten.max_pool3d_with_indices.default(buf1368, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1370 = buf1369[0]
del buf1369
buf1372 = buf1368; del buf1368 # reuse
buf1376 = buf1364; del buf1364 # reuse
buf1380 = buf1360; del buf1360 # reuse
buf1388 = buf1356; del buf1356 # reuse
buf1392 = buf1354; del buf1354 # reuse
buf1396 = buf1350; del buf1350 # reuse
# Topologically Sorted Source Nodes: [p1_97, p3_97, min_195, p2_97, img_48, neg_588, neg_590, neg_592, neg_594, neg_596, neg_598], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf1362, buf1370, buf1366, buf1372, buf1376, buf1380, buf1388, buf1392, buf1396, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_97, p3_97, min_195, p2_97, img_48, neg_588, max_pool3d_343], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1373 = torch.ops.aten.max_pool3d_with_indices.default(buf1372, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1372
buf1374 = buf1373[0]
del buf1373
# Topologically Sorted Source Nodes: [p1_97, p3_97, min_195, p2_97, img_48, neg_590, max_pool3d_344], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1377 = torch.ops.aten.max_pool3d_with_indices.default(buf1376, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1376
buf1378 = buf1377[0]
del buf1377
# Topologically Sorted Source Nodes: [p1_97, p3_97, min_195, p2_97, img_48, neg_592, max_pool3d_345], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1381 = torch.ops.aten.max_pool3d_with_indices.default(buf1380, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1380
buf1382 = buf1381[0]
del buf1381
buf1384 = buf1374; del buf1374 # reuse
# Topologically Sorted Source Nodes: [p1_98, p3_98, min_197, p2_98, min_198], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf1384, buf1382, buf1378, 256, grid=grid(256), stream=stream0)
del buf1378
del buf1382
# Topologically Sorted Source Nodes: [p1_98, p3_98, min_197, p2_98, min_198, img1_49], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1385 = torch.ops.aten.max_pool3d_with_indices.default(buf1384, [3, 3, 3], [1, 1, 1], [1, 1, 1])
del buf1384
buf1386 = buf1385[0]
del buf1385
# Topologically Sorted Source Nodes: [p1_97, p3_97, min_195, p2_97, img_48, neg_594, max_pool3d_347], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1389 = torch.ops.aten.max_pool3d_with_indices.default(buf1388, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1390 = buf1389[0]
del buf1389
# Topologically Sorted Source Nodes: [p1_97, p3_97, min_195, p2_97, img_48, neg_596, max_pool3d_348], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1393 = torch.ops.aten.max_pool3d_with_indices.default(buf1392, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1394 = buf1393[0]
del buf1393
# Topologically Sorted Source Nodes: [p1_97, p3_97, min_195, p2_97, img_48, neg_598, max_pool3d_349], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1397 = torch.ops.aten.max_pool3d_with_indices.default(buf1396, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1398 = buf1397[0]
del buf1397
buf1400 = buf1396; del buf1396 # reuse
buf1404 = buf1392; del buf1392 # reuse
buf1408 = buf1388; del buf1388 # reuse
# Topologically Sorted Source Nodes: [p1_99, p3_99, min_199, p2_99, img_49, neg_600, neg_602, neg_604], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_4.run(buf1390, buf1398, buf1394, buf1400, buf1404, buf1408, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_99, p3_99, min_199, p2_99, img_49, neg_600, max_pool3d_350], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1401 = torch.ops.aten.max_pool3d_with_indices.default(buf1400, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1402 = buf1401[0]
del buf1401
# Topologically Sorted Source Nodes: [p1_99, p3_99, min_199, p2_99, img_49, neg_602, max_pool3d_351], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1405 = torch.ops.aten.max_pool3d_with_indices.default(buf1404, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1406 = buf1405[0]
del buf1405
# Topologically Sorted Source Nodes: [p1_99, p3_99, min_199, p2_99, img_49, neg_604, max_pool3d_352], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1409 = torch.ops.aten.max_pool3d_with_indices.default(buf1408, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1410 = buf1409[0]
del buf1409
buf1412 = buf1402; del buf1402 # reuse
# Topologically Sorted Source Nodes: [p1_100, p3_100, min_201, p2_100, min_202], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf1412, buf1410, buf1406, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_100, p3_100, min_201, p2_100, min_202, img1_50], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1413 = torch.ops.aten.max_pool3d_with_indices.default(buf1412, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1414 = buf1413[0]
del buf1413
buf1416 = buf1412; del buf1412 # reuse
buf1420 = buf1410; del buf1410 # reuse
buf1424 = buf1406; del buf1406 # reuse
buf1432 = buf1408; del buf1408 # reuse
buf1436 = buf1404; del buf1404 # reuse
buf1440 = buf1400; del buf1400 # reuse
# Topologically Sorted Source Nodes: [neg_606, neg_608, neg_610, neg_612, neg_614, neg_616], Original ATen: [aten.neg]
triton_poi_fused_neg_0.run(arg1_1, buf1416, buf1420, buf1424, buf1432, buf1436, buf1440, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [neg_606, max_pool3d_354], Original ATen: [aten.neg, aten.max_pool3d_with_indices]
buf1417 = torch.ops.aten.max_pool3d_with_indices.default(buf1416, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1416
buf1418 = buf1417[0]
del buf1417
# Topologically Sorted Source Nodes: [neg_608, max_pool3d_355], Original ATen: [aten.neg, aten.max_pool3d_with_indices]
buf1421 = torch.ops.aten.max_pool3d_with_indices.default(buf1420, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1420
buf1422 = buf1421[0]
del buf1421
# Topologically Sorted Source Nodes: [neg_610, max_pool3d_356], Original ATen: [aten.neg, aten.max_pool3d_with_indices]
buf1425 = torch.ops.aten.max_pool3d_with_indices.default(buf1424, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1424
buf1426 = buf1425[0]
del buf1425
buf1428 = buf1418; del buf1418 # reuse
# Topologically Sorted Source Nodes: [p1_101, p3_101, min_203, p2_101, min_204], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf1428, buf1426, buf1422, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_101, p3_101, min_203, p2_101, min_204, img1_51], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1429 = torch.ops.aten.max_pool3d_with_indices.default(buf1428, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1430 = buf1429[0]
del buf1429
# Topologically Sorted Source Nodes: [neg_612, max_pool3d_358], Original ATen: [aten.neg, aten.max_pool3d_with_indices]
buf1433 = torch.ops.aten.max_pool3d_with_indices.default(buf1432, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1434 = buf1433[0]
del buf1433
# Topologically Sorted Source Nodes: [neg_614, max_pool3d_359], Original ATen: [aten.neg, aten.max_pool3d_with_indices]
buf1437 = torch.ops.aten.max_pool3d_with_indices.default(buf1436, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1438 = buf1437[0]
del buf1437
# Topologically Sorted Source Nodes: [neg_616, max_pool3d_360], Original ATen: [aten.neg, aten.max_pool3d_with_indices]
buf1441 = torch.ops.aten.max_pool3d_with_indices.default(buf1440, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1442 = buf1441[0]
del buf1441
buf1444 = buf1440; del buf1440 # reuse
buf1448 = buf1436; del buf1436 # reuse
buf1452 = buf1432; del buf1432 # reuse
buf1460 = buf1428; del buf1428 # reuse
buf1464 = buf1426; del buf1426 # reuse
buf1468 = buf1422; del buf1422 # reuse
# Topologically Sorted Source Nodes: [p1_102, p3_102, min_205, p2_102, img_50, neg_618, neg_620, neg_622, neg_624, neg_626, neg_628], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf1434, buf1442, buf1438, buf1444, buf1448, buf1452, buf1460, buf1464, buf1468, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_102, p3_102, min_205, p2_102, img_50, neg_618, max_pool3d_361], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1445 = torch.ops.aten.max_pool3d_with_indices.default(buf1444, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1444
buf1446 = buf1445[0]
del buf1445
# Topologically Sorted Source Nodes: [p1_102, p3_102, min_205, p2_102, img_50, neg_620, max_pool3d_362], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1449 = torch.ops.aten.max_pool3d_with_indices.default(buf1448, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1448
buf1450 = buf1449[0]
del buf1449
# Topologically Sorted Source Nodes: [p1_102, p3_102, min_205, p2_102, img_50, neg_622, max_pool3d_363], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1453 = torch.ops.aten.max_pool3d_with_indices.default(buf1452, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1452
buf1454 = buf1453[0]
del buf1453
buf1456 = buf1446; del buf1446 # reuse
# Topologically Sorted Source Nodes: [p1_103, p3_103, min_207, p2_103, min_208], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf1456, buf1454, buf1450, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_103, p3_103, min_207, p2_103, min_208, img1_52], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1457 = torch.ops.aten.max_pool3d_with_indices.default(buf1456, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1458 = buf1457[0]
del buf1457
# Topologically Sorted Source Nodes: [p1_102, p3_102, min_205, p2_102, img_50, neg_624, max_pool3d_365], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1461 = torch.ops.aten.max_pool3d_with_indices.default(buf1460, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1462 = buf1461[0]
del buf1461
# Topologically Sorted Source Nodes: [p1_102, p3_102, min_205, p2_102, img_50, neg_626, max_pool3d_366], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1465 = torch.ops.aten.max_pool3d_with_indices.default(buf1464, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1466 = buf1465[0]
del buf1465
# Topologically Sorted Source Nodes: [p1_102, p3_102, min_205, p2_102, img_50, neg_628, max_pool3d_367], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1469 = torch.ops.aten.max_pool3d_with_indices.default(buf1468, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1470 = buf1469[0]
del buf1469
buf1472 = buf1468; del buf1468 # reuse
buf1476 = buf1464; del buf1464 # reuse
buf1480 = buf1460; del buf1460 # reuse
buf1488 = buf1456; del buf1456 # reuse
buf1492 = buf1454; del buf1454 # reuse
buf1496 = buf1450; del buf1450 # reuse
# Topologically Sorted Source Nodes: [p1_104, p3_104, min_209, p2_104, img_51, neg_630, neg_632, neg_634, neg_636, neg_638, neg_640], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf1462, buf1470, buf1466, buf1472, buf1476, buf1480, buf1488, buf1492, buf1496, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_104, p3_104, min_209, p2_104, img_51, neg_630, max_pool3d_368], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1473 = torch.ops.aten.max_pool3d_with_indices.default(buf1472, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1472
buf1474 = buf1473[0]
del buf1473
# Topologically Sorted Source Nodes: [p1_104, p3_104, min_209, p2_104, img_51, neg_632, max_pool3d_369], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1477 = torch.ops.aten.max_pool3d_with_indices.default(buf1476, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1476
buf1478 = buf1477[0]
del buf1477
# Topologically Sorted Source Nodes: [p1_104, p3_104, min_209, p2_104, img_51, neg_634, max_pool3d_370], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1481 = torch.ops.aten.max_pool3d_with_indices.default(buf1480, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1480
buf1482 = buf1481[0]
del buf1481
buf1484 = buf1474; del buf1474 # reuse
# Topologically Sorted Source Nodes: [p1_105, p3_105, min_211, p2_105, min_212], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf1484, buf1482, buf1478, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_105, p3_105, min_211, p2_105, min_212, img1_53], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1485 = torch.ops.aten.max_pool3d_with_indices.default(buf1484, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1486 = buf1485[0]
del buf1485
# Topologically Sorted Source Nodes: [p1_104, p3_104, min_209, p2_104, img_51, neg_636, max_pool3d_372], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1489 = torch.ops.aten.max_pool3d_with_indices.default(buf1488, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1490 = buf1489[0]
del buf1489
# Topologically Sorted Source Nodes: [p1_104, p3_104, min_209, p2_104, img_51, neg_638, max_pool3d_373], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1493 = torch.ops.aten.max_pool3d_with_indices.default(buf1492, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1494 = buf1493[0]
del buf1493
# Topologically Sorted Source Nodes: [p1_104, p3_104, min_209, p2_104, img_51, neg_640, max_pool3d_374], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1497 = torch.ops.aten.max_pool3d_with_indices.default(buf1496, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1498 = buf1497[0]
del buf1497
buf1500 = buf1496; del buf1496 # reuse
buf1504 = buf1492; del buf1492 # reuse
buf1508 = buf1488; del buf1488 # reuse
buf1516 = buf1484; del buf1484 # reuse
buf1520 = buf1482; del buf1482 # reuse
buf1524 = buf1478; del buf1478 # reuse
# Topologically Sorted Source Nodes: [p1_106, p3_106, min_213, p2_106, img_52, neg_642, neg_644, neg_646, neg_648, neg_650, neg_652], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf1490, buf1498, buf1494, buf1500, buf1504, buf1508, buf1516, buf1520, buf1524, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_106, p3_106, min_213, p2_106, img_52, neg_642, max_pool3d_375], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1501 = torch.ops.aten.max_pool3d_with_indices.default(buf1500, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1500
buf1502 = buf1501[0]
del buf1501
# Topologically Sorted Source Nodes: [p1_106, p3_106, min_213, p2_106, img_52, neg_644, max_pool3d_376], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1505 = torch.ops.aten.max_pool3d_with_indices.default(buf1504, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1504
buf1506 = buf1505[0]
del buf1505
# Topologically Sorted Source Nodes: [p1_106, p3_106, min_213, p2_106, img_52, neg_646, max_pool3d_377], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1509 = torch.ops.aten.max_pool3d_with_indices.default(buf1508, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1508
buf1510 = buf1509[0]
del buf1509
buf1512 = buf1502; del buf1502 # reuse
# Topologically Sorted Source Nodes: [p1_107, p3_107, min_215, p2_107, min_216], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf1512, buf1510, buf1506, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_107, p3_107, min_215, p2_107, min_216, img1_54], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1513 = torch.ops.aten.max_pool3d_with_indices.default(buf1512, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1514 = buf1513[0]
del buf1513
# Topologically Sorted Source Nodes: [p1_106, p3_106, min_213, p2_106, img_52, neg_648, max_pool3d_379], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1517 = torch.ops.aten.max_pool3d_with_indices.default(buf1516, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1518 = buf1517[0]
del buf1517
# Topologically Sorted Source Nodes: [p1_106, p3_106, min_213, p2_106, img_52, neg_650, max_pool3d_380], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1521 = torch.ops.aten.max_pool3d_with_indices.default(buf1520, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1522 = buf1521[0]
del buf1521
# Topologically Sorted Source Nodes: [p1_106, p3_106, min_213, p2_106, img_52, neg_652, max_pool3d_381], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1525 = torch.ops.aten.max_pool3d_with_indices.default(buf1524, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1526 = buf1525[0]
del buf1525
buf1528 = buf1524; del buf1524 # reuse
buf1532 = buf1520; del buf1520 # reuse
buf1536 = buf1516; del buf1516 # reuse
buf1544 = buf1512; del buf1512 # reuse
buf1548 = buf1510; del buf1510 # reuse
buf1552 = buf1506; del buf1506 # reuse
# Topologically Sorted Source Nodes: [p1_108, p3_108, min_217, p2_108, img_53, neg_654, neg_656, neg_658, neg_660, neg_662, neg_664], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf1518, buf1526, buf1522, buf1528, buf1532, buf1536, buf1544, buf1548, buf1552, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_108, p3_108, min_217, p2_108, img_53, neg_654, max_pool3d_382], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1529 = torch.ops.aten.max_pool3d_with_indices.default(buf1528, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1528
buf1530 = buf1529[0]
del buf1529
# Topologically Sorted Source Nodes: [p1_108, p3_108, min_217, p2_108, img_53, neg_656, max_pool3d_383], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1533 = torch.ops.aten.max_pool3d_with_indices.default(buf1532, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1532
buf1534 = buf1533[0]
del buf1533
# Topologically Sorted Source Nodes: [p1_108, p3_108, min_217, p2_108, img_53, neg_658, max_pool3d_384], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1537 = torch.ops.aten.max_pool3d_with_indices.default(buf1536, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1536
buf1538 = buf1537[0]
del buf1537
buf1540 = buf1530; del buf1530 # reuse
# Topologically Sorted Source Nodes: [p1_109, p3_109, min_219, p2_109, min_220], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf1540, buf1538, buf1534, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_109, p3_109, min_219, p2_109, min_220, img1_55], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1541 = torch.ops.aten.max_pool3d_with_indices.default(buf1540, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1542 = buf1541[0]
del buf1541
# Topologically Sorted Source Nodes: [p1_108, p3_108, min_217, p2_108, img_53, neg_660, max_pool3d_386], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1545 = torch.ops.aten.max_pool3d_with_indices.default(buf1544, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1546 = buf1545[0]
del buf1545
# Topologically Sorted Source Nodes: [p1_108, p3_108, min_217, p2_108, img_53, neg_662, max_pool3d_387], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1549 = torch.ops.aten.max_pool3d_with_indices.default(buf1548, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1550 = buf1549[0]
del buf1549
# Topologically Sorted Source Nodes: [p1_108, p3_108, min_217, p2_108, img_53, neg_664, max_pool3d_388], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1553 = torch.ops.aten.max_pool3d_with_indices.default(buf1552, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1554 = buf1553[0]
del buf1553
buf1556 = buf1552; del buf1552 # reuse
buf1560 = buf1548; del buf1548 # reuse
buf1564 = buf1544; del buf1544 # reuse
buf1572 = buf1540; del buf1540 # reuse
buf1576 = buf1538; del buf1538 # reuse
buf1580 = buf1534; del buf1534 # reuse
# Topologically Sorted Source Nodes: [p1_110, p3_110, min_221, p2_110, img_54, neg_666, neg_668, neg_670, neg_672, neg_674, neg_676], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf1546, buf1554, buf1550, buf1556, buf1560, buf1564, buf1572, buf1576, buf1580, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_110, p3_110, min_221, p2_110, img_54, neg_666, max_pool3d_389], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1557 = torch.ops.aten.max_pool3d_with_indices.default(buf1556, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1556
buf1558 = buf1557[0]
del buf1557
# Topologically Sorted Source Nodes: [p1_110, p3_110, min_221, p2_110, img_54, neg_668, max_pool3d_390], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1561 = torch.ops.aten.max_pool3d_with_indices.default(buf1560, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1560
buf1562 = buf1561[0]
del buf1561
# Topologically Sorted Source Nodes: [p1_110, p3_110, min_221, p2_110, img_54, neg_670, max_pool3d_391], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1565 = torch.ops.aten.max_pool3d_with_indices.default(buf1564, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1564
buf1566 = buf1565[0]
del buf1565
buf1568 = buf1558; del buf1558 # reuse
# Topologically Sorted Source Nodes: [p1_111, p3_111, min_223, p2_111, min_224], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf1568, buf1566, buf1562, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_111, p3_111, min_223, p2_111, min_224, img1_56], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1569 = torch.ops.aten.max_pool3d_with_indices.default(buf1568, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1570 = buf1569[0]
del buf1569
# Topologically Sorted Source Nodes: [p1_110, p3_110, min_221, p2_110, img_54, neg_672, max_pool3d_393], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1573 = torch.ops.aten.max_pool3d_with_indices.default(buf1572, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1574 = buf1573[0]
del buf1573
# Topologically Sorted Source Nodes: [p1_110, p3_110, min_221, p2_110, img_54, neg_674, max_pool3d_394], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1577 = torch.ops.aten.max_pool3d_with_indices.default(buf1576, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1578 = buf1577[0]
del buf1577
# Topologically Sorted Source Nodes: [p1_110, p3_110, min_221, p2_110, img_54, neg_676, max_pool3d_395], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1581 = torch.ops.aten.max_pool3d_with_indices.default(buf1580, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1582 = buf1581[0]
del buf1581
buf1584 = buf1580; del buf1580 # reuse
buf1588 = buf1576; del buf1576 # reuse
buf1592 = buf1572; del buf1572 # reuse
buf1600 = buf1568; del buf1568 # reuse
buf1604 = buf1566; del buf1566 # reuse
buf1608 = buf1562; del buf1562 # reuse
# Topologically Sorted Source Nodes: [p1_112, p3_112, min_225, p2_112, img_55, neg_678, neg_680, neg_682, neg_684, neg_686, neg_688], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf1574, buf1582, buf1578, buf1584, buf1588, buf1592, buf1600, buf1604, buf1608, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_112, p3_112, min_225, p2_112, img_55, neg_678, max_pool3d_396], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1585 = torch.ops.aten.max_pool3d_with_indices.default(buf1584, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1584
buf1586 = buf1585[0]
del buf1585
# Topologically Sorted Source Nodes: [p1_112, p3_112, min_225, p2_112, img_55, neg_680, max_pool3d_397], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1589 = torch.ops.aten.max_pool3d_with_indices.default(buf1588, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1588
buf1590 = buf1589[0]
del buf1589
# Topologically Sorted Source Nodes: [p1_112, p3_112, min_225, p2_112, img_55, neg_682, max_pool3d_398], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1593 = torch.ops.aten.max_pool3d_with_indices.default(buf1592, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1592
buf1594 = buf1593[0]
del buf1593
buf1596 = buf1586; del buf1586 # reuse
# Topologically Sorted Source Nodes: [p1_113, p3_113, min_227, p2_113, min_228], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf1596, buf1594, buf1590, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_113, p3_113, min_227, p2_113, min_228, img1_57], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1597 = torch.ops.aten.max_pool3d_with_indices.default(buf1596, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1598 = buf1597[0]
del buf1597
# Topologically Sorted Source Nodes: [p1_112, p3_112, min_225, p2_112, img_55, neg_684, max_pool3d_400], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1601 = torch.ops.aten.max_pool3d_with_indices.default(buf1600, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1602 = buf1601[0]
del buf1601
# Topologically Sorted Source Nodes: [p1_112, p3_112, min_225, p2_112, img_55, neg_686, max_pool3d_401], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1605 = torch.ops.aten.max_pool3d_with_indices.default(buf1604, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1606 = buf1605[0]
del buf1605
# Topologically Sorted Source Nodes: [p1_112, p3_112, min_225, p2_112, img_55, neg_688, max_pool3d_402], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1609 = torch.ops.aten.max_pool3d_with_indices.default(buf1608, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1610 = buf1609[0]
del buf1609
buf1612 = buf1608; del buf1608 # reuse
buf1616 = buf1604; del buf1604 # reuse
buf1620 = buf1600; del buf1600 # reuse
buf1628 = buf1596; del buf1596 # reuse
buf1632 = buf1594; del buf1594 # reuse
buf1636 = buf1590; del buf1590 # reuse
# Topologically Sorted Source Nodes: [p1_114, p3_114, min_229, p2_114, img_56, neg_690, neg_692, neg_694, neg_696, neg_698, neg_700], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf1602, buf1610, buf1606, buf1612, buf1616, buf1620, buf1628, buf1632, buf1636, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_114, p3_114, min_229, p2_114, img_56, neg_690, max_pool3d_403], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1613 = torch.ops.aten.max_pool3d_with_indices.default(buf1612, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1612
buf1614 = buf1613[0]
del buf1613
# Topologically Sorted Source Nodes: [p1_114, p3_114, min_229, p2_114, img_56, neg_692, max_pool3d_404], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1617 = torch.ops.aten.max_pool3d_with_indices.default(buf1616, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1616
buf1618 = buf1617[0]
del buf1617
# Topologically Sorted Source Nodes: [p1_114, p3_114, min_229, p2_114, img_56, neg_694, max_pool3d_405], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1621 = torch.ops.aten.max_pool3d_with_indices.default(buf1620, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1620
buf1622 = buf1621[0]
del buf1621
buf1624 = buf1614; del buf1614 # reuse
# Topologically Sorted Source Nodes: [p1_115, p3_115, min_231, p2_115, min_232], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf1624, buf1622, buf1618, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_115, p3_115, min_231, p2_115, min_232, img1_58], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1625 = torch.ops.aten.max_pool3d_with_indices.default(buf1624, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1626 = buf1625[0]
del buf1625
# Topologically Sorted Source Nodes: [p1_114, p3_114, min_229, p2_114, img_56, neg_696, max_pool3d_407], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1629 = torch.ops.aten.max_pool3d_with_indices.default(buf1628, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1630 = buf1629[0]
del buf1629
# Topologically Sorted Source Nodes: [p1_114, p3_114, min_229, p2_114, img_56, neg_698, max_pool3d_408], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1633 = torch.ops.aten.max_pool3d_with_indices.default(buf1632, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1634 = buf1633[0]
del buf1633
# Topologically Sorted Source Nodes: [p1_114, p3_114, min_229, p2_114, img_56, neg_700, max_pool3d_409], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1637 = torch.ops.aten.max_pool3d_with_indices.default(buf1636, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1638 = buf1637[0]
del buf1637
buf1640 = buf1636; del buf1636 # reuse
buf1644 = buf1632; del buf1632 # reuse
buf1648 = buf1628; del buf1628 # reuse
buf1656 = buf1624; del buf1624 # reuse
buf1660 = buf1622; del buf1622 # reuse
buf1664 = buf1618; del buf1618 # reuse
# Topologically Sorted Source Nodes: [p1_116, p3_116, min_233, p2_116, img_57, neg_702, neg_704, neg_706, neg_708, neg_710, neg_712], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf1630, buf1638, buf1634, buf1640, buf1644, buf1648, buf1656, buf1660, buf1664, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_116, p3_116, min_233, p2_116, img_57, neg_702, max_pool3d_410], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1641 = torch.ops.aten.max_pool3d_with_indices.default(buf1640, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1640
buf1642 = buf1641[0]
del buf1641
# Topologically Sorted Source Nodes: [p1_116, p3_116, min_233, p2_116, img_57, neg_704, max_pool3d_411], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1645 = torch.ops.aten.max_pool3d_with_indices.default(buf1644, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1644
buf1646 = buf1645[0]
del buf1645
# Topologically Sorted Source Nodes: [p1_116, p3_116, min_233, p2_116, img_57, neg_706, max_pool3d_412], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1649 = torch.ops.aten.max_pool3d_with_indices.default(buf1648, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1648
buf1650 = buf1649[0]
del buf1649
buf1652 = buf1642; del buf1642 # reuse
# Topologically Sorted Source Nodes: [p1_117, p3_117, min_235, p2_117, min_236], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf1652, buf1650, buf1646, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_117, p3_117, min_235, p2_117, min_236, img1_59], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1653 = torch.ops.aten.max_pool3d_with_indices.default(buf1652, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1654 = buf1653[0]
del buf1653
# Topologically Sorted Source Nodes: [p1_116, p3_116, min_233, p2_116, img_57, neg_708, max_pool3d_414], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1657 = torch.ops.aten.max_pool3d_with_indices.default(buf1656, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1658 = buf1657[0]
del buf1657
# Topologically Sorted Source Nodes: [p1_116, p3_116, min_233, p2_116, img_57, neg_710, max_pool3d_415], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1661 = torch.ops.aten.max_pool3d_with_indices.default(buf1660, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1662 = buf1661[0]
del buf1661
# Topologically Sorted Source Nodes: [p1_116, p3_116, min_233, p2_116, img_57, neg_712, max_pool3d_416], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1665 = torch.ops.aten.max_pool3d_with_indices.default(buf1664, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1666 = buf1665[0]
del buf1665
buf1668 = buf1664; del buf1664 # reuse
buf1672 = buf1660; del buf1660 # reuse
buf1676 = buf1656; del buf1656 # reuse
buf1684 = buf1652; del buf1652 # reuse
buf1688 = buf1650; del buf1650 # reuse
buf1692 = buf1646; del buf1646 # reuse
# Topologically Sorted Source Nodes: [p1_118, p3_118, min_237, p2_118, img_58, neg_714, neg_716, neg_718, neg_720, neg_722, neg_724], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf1658, buf1666, buf1662, buf1668, buf1672, buf1676, buf1684, buf1688, buf1692, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_118, p3_118, min_237, p2_118, img_58, neg_714, max_pool3d_417], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1669 = torch.ops.aten.max_pool3d_with_indices.default(buf1668, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1668
buf1670 = buf1669[0]
del buf1669
# Topologically Sorted Source Nodes: [p1_118, p3_118, min_237, p2_118, img_58, neg_716, max_pool3d_418], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1673 = torch.ops.aten.max_pool3d_with_indices.default(buf1672, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1672
buf1674 = buf1673[0]
del buf1673
# Topologically Sorted Source Nodes: [p1_118, p3_118, min_237, p2_118, img_58, neg_718, max_pool3d_419], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1677 = torch.ops.aten.max_pool3d_with_indices.default(buf1676, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1676
buf1678 = buf1677[0]
del buf1677
buf1680 = buf1670; del buf1670 # reuse
# Topologically Sorted Source Nodes: [p1_119, p3_119, min_239, p2_119, min_240], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf1680, buf1678, buf1674, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_119, p3_119, min_239, p2_119, min_240, img1_60], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1681 = torch.ops.aten.max_pool3d_with_indices.default(buf1680, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1682 = buf1681[0]
del buf1681
# Topologically Sorted Source Nodes: [p1_118, p3_118, min_237, p2_118, img_58, neg_720, max_pool3d_421], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1685 = torch.ops.aten.max_pool3d_with_indices.default(buf1684, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1686 = buf1685[0]
del buf1685
# Topologically Sorted Source Nodes: [p1_118, p3_118, min_237, p2_118, img_58, neg_722, max_pool3d_422], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1689 = torch.ops.aten.max_pool3d_with_indices.default(buf1688, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1690 = buf1689[0]
del buf1689
# Topologically Sorted Source Nodes: [p1_118, p3_118, min_237, p2_118, img_58, neg_724, max_pool3d_423], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1693 = torch.ops.aten.max_pool3d_with_indices.default(buf1692, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1694 = buf1693[0]
del buf1693
buf1696 = buf1692; del buf1692 # reuse
buf1700 = buf1688; del buf1688 # reuse
buf1704 = buf1684; del buf1684 # reuse
buf1712 = buf1680; del buf1680 # reuse
buf1716 = buf1678; del buf1678 # reuse
buf1720 = buf1674; del buf1674 # reuse
# Topologically Sorted Source Nodes: [p1_120, p3_120, min_241, p2_120, img_59, neg_726, neg_728, neg_730, neg_732, neg_734, neg_736], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf1686, buf1694, buf1690, buf1696, buf1700, buf1704, buf1712, buf1716, buf1720, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_120, p3_120, min_241, p2_120, img_59, neg_726, max_pool3d_424], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1697 = torch.ops.aten.max_pool3d_with_indices.default(buf1696, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1696
buf1698 = buf1697[0]
del buf1697
# Topologically Sorted Source Nodes: [p1_120, p3_120, min_241, p2_120, img_59, neg_728, max_pool3d_425], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1701 = torch.ops.aten.max_pool3d_with_indices.default(buf1700, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1700
buf1702 = buf1701[0]
del buf1701
# Topologically Sorted Source Nodes: [p1_120, p3_120, min_241, p2_120, img_59, neg_730, max_pool3d_426], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1705 = torch.ops.aten.max_pool3d_with_indices.default(buf1704, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1704
buf1706 = buf1705[0]
del buf1705
buf1708 = buf1698; del buf1698 # reuse
# Topologically Sorted Source Nodes: [p1_121, p3_121, min_243, p2_121, min_244], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf1708, buf1706, buf1702, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_121, p3_121, min_243, p2_121, min_244, img1_61], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1709 = torch.ops.aten.max_pool3d_with_indices.default(buf1708, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1710 = buf1709[0]
del buf1709
# Topologically Sorted Source Nodes: [p1_120, p3_120, min_241, p2_120, img_59, neg_732, max_pool3d_428], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1713 = torch.ops.aten.max_pool3d_with_indices.default(buf1712, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1714 = buf1713[0]
del buf1713
# Topologically Sorted Source Nodes: [p1_120, p3_120, min_241, p2_120, img_59, neg_734, max_pool3d_429], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1717 = torch.ops.aten.max_pool3d_with_indices.default(buf1716, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1718 = buf1717[0]
del buf1717
# Topologically Sorted Source Nodes: [p1_120, p3_120, min_241, p2_120, img_59, neg_736, max_pool3d_430], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1721 = torch.ops.aten.max_pool3d_with_indices.default(buf1720, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1722 = buf1721[0]
del buf1721
buf1724 = buf1720; del buf1720 # reuse
buf1728 = buf1716; del buf1716 # reuse
buf1732 = buf1712; del buf1712 # reuse
buf1740 = buf1708; del buf1708 # reuse
buf1744 = buf1706; del buf1706 # reuse
buf1748 = buf1702; del buf1702 # reuse
# Topologically Sorted Source Nodes: [p1_122, p3_122, min_245, p2_122, img_60, neg_738, neg_740, neg_742, neg_744, neg_746, neg_748], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf1714, buf1722, buf1718, buf1724, buf1728, buf1732, buf1740, buf1744, buf1748, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_122, p3_122, min_245, p2_122, img_60, neg_738, max_pool3d_431], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1725 = torch.ops.aten.max_pool3d_with_indices.default(buf1724, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1724
buf1726 = buf1725[0]
del buf1725
# Topologically Sorted Source Nodes: [p1_122, p3_122, min_245, p2_122, img_60, neg_740, max_pool3d_432], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1729 = torch.ops.aten.max_pool3d_with_indices.default(buf1728, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1728
buf1730 = buf1729[0]
del buf1729
# Topologically Sorted Source Nodes: [p1_122, p3_122, min_245, p2_122, img_60, neg_742, max_pool3d_433], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1733 = torch.ops.aten.max_pool3d_with_indices.default(buf1732, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1732
buf1734 = buf1733[0]
del buf1733
buf1736 = buf1726; del buf1726 # reuse
# Topologically Sorted Source Nodes: [p1_123, p3_123, min_247, p2_123, min_248], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf1736, buf1734, buf1730, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_123, p3_123, min_247, p2_123, min_248, img1_62], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1737 = torch.ops.aten.max_pool3d_with_indices.default(buf1736, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1738 = buf1737[0]
del buf1737
# Topologically Sorted Source Nodes: [p1_122, p3_122, min_245, p2_122, img_60, neg_744, max_pool3d_435], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1741 = torch.ops.aten.max_pool3d_with_indices.default(buf1740, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1742 = buf1741[0]
del buf1741
# Topologically Sorted Source Nodes: [p1_122, p3_122, min_245, p2_122, img_60, neg_746, max_pool3d_436], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1745 = torch.ops.aten.max_pool3d_with_indices.default(buf1744, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1746 = buf1745[0]
del buf1745
# Topologically Sorted Source Nodes: [p1_122, p3_122, min_245, p2_122, img_60, neg_748, max_pool3d_437], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1749 = torch.ops.aten.max_pool3d_with_indices.default(buf1748, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1750 = buf1749[0]
del buf1749
buf1752 = buf1748; del buf1748 # reuse
buf1756 = buf1744; del buf1744 # reuse
buf1760 = buf1740; del buf1740 # reuse
buf1768 = buf1736; del buf1736 # reuse
buf1772 = buf1734; del buf1734 # reuse
buf1776 = buf1730; del buf1730 # reuse
# Topologically Sorted Source Nodes: [p1_124, p3_124, min_249, p2_124, img_61, neg_750, neg_752, neg_754, neg_756, neg_758, neg_760], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf1742, buf1750, buf1746, buf1752, buf1756, buf1760, buf1768, buf1772, buf1776, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_124, p3_124, min_249, p2_124, img_61, neg_750, max_pool3d_438], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1753 = torch.ops.aten.max_pool3d_with_indices.default(buf1752, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1752
buf1754 = buf1753[0]
del buf1753
# Topologically Sorted Source Nodes: [p1_124, p3_124, min_249, p2_124, img_61, neg_752, max_pool3d_439], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1757 = torch.ops.aten.max_pool3d_with_indices.default(buf1756, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1756
buf1758 = buf1757[0]
del buf1757
# Topologically Sorted Source Nodes: [p1_124, p3_124, min_249, p2_124, img_61, neg_754, max_pool3d_440], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1761 = torch.ops.aten.max_pool3d_with_indices.default(buf1760, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1760
buf1762 = buf1761[0]
del buf1761
buf1764 = buf1754; del buf1754 # reuse
# Topologically Sorted Source Nodes: [p1_125, p3_125, min_251, p2_125, min_252], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf1764, buf1762, buf1758, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_125, p3_125, min_251, p2_125, min_252, img1_63], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1765 = torch.ops.aten.max_pool3d_with_indices.default(buf1764, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1766 = buf1765[0]
del buf1765
# Topologically Sorted Source Nodes: [p1_124, p3_124, min_249, p2_124, img_61, neg_756, max_pool3d_442], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1769 = torch.ops.aten.max_pool3d_with_indices.default(buf1768, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1770 = buf1769[0]
del buf1769
# Topologically Sorted Source Nodes: [p1_124, p3_124, min_249, p2_124, img_61, neg_758, max_pool3d_443], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1773 = torch.ops.aten.max_pool3d_with_indices.default(buf1772, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1774 = buf1773[0]
del buf1773
# Topologically Sorted Source Nodes: [p1_124, p3_124, min_249, p2_124, img_61, neg_760, max_pool3d_444], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1777 = torch.ops.aten.max_pool3d_with_indices.default(buf1776, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1778 = buf1777[0]
del buf1777
buf1780 = buf1776; del buf1776 # reuse
buf1784 = buf1772; del buf1772 # reuse
buf1788 = buf1768; del buf1768 # reuse
buf1796 = buf1764; del buf1764 # reuse
buf1800 = buf1762; del buf1762 # reuse
buf1804 = buf1758; del buf1758 # reuse
# Topologically Sorted Source Nodes: [p1_126, p3_126, min_253, p2_126, img_62, neg_762, neg_764, neg_766, neg_768, neg_770, neg_772], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf1770, buf1778, buf1774, buf1780, buf1784, buf1788, buf1796, buf1800, buf1804, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_126, p3_126, min_253, p2_126, img_62, neg_762, max_pool3d_445], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1781 = torch.ops.aten.max_pool3d_with_indices.default(buf1780, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1780
buf1782 = buf1781[0]
del buf1781
# Topologically Sorted Source Nodes: [p1_126, p3_126, min_253, p2_126, img_62, neg_764, max_pool3d_446], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1785 = torch.ops.aten.max_pool3d_with_indices.default(buf1784, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1784
buf1786 = buf1785[0]
del buf1785
# Topologically Sorted Source Nodes: [p1_126, p3_126, min_253, p2_126, img_62, neg_766, max_pool3d_447], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1789 = torch.ops.aten.max_pool3d_with_indices.default(buf1788, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1788
buf1790 = buf1789[0]
del buf1789
buf1792 = buf1782; del buf1782 # reuse
# Topologically Sorted Source Nodes: [p1_127, p3_127, min_255, p2_127, min_256], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf1792, buf1790, buf1786, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_127, p3_127, min_255, p2_127, min_256, img1_64], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1793 = torch.ops.aten.max_pool3d_with_indices.default(buf1792, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1794 = buf1793[0]
del buf1793
# Topologically Sorted Source Nodes: [p1_126, p3_126, min_253, p2_126, img_62, neg_768, max_pool3d_449], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1797 = torch.ops.aten.max_pool3d_with_indices.default(buf1796, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1798 = buf1797[0]
del buf1797
# Topologically Sorted Source Nodes: [p1_126, p3_126, min_253, p2_126, img_62, neg_770, max_pool3d_450], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1801 = torch.ops.aten.max_pool3d_with_indices.default(buf1800, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1802 = buf1801[0]
del buf1801
# Topologically Sorted Source Nodes: [p1_126, p3_126, min_253, p2_126, img_62, neg_772, max_pool3d_451], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1805 = torch.ops.aten.max_pool3d_with_indices.default(buf1804, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1806 = buf1805[0]
del buf1805
buf1808 = buf1804; del buf1804 # reuse
buf1812 = buf1800; del buf1800 # reuse
buf1816 = buf1796; del buf1796 # reuse
buf1824 = buf1792; del buf1792 # reuse
buf1828 = buf1790; del buf1790 # reuse
buf1832 = buf1786; del buf1786 # reuse
# Topologically Sorted Source Nodes: [p1_128, p3_128, min_257, p2_128, img_63, neg_774, neg_776, neg_778, neg_780, neg_782, neg_784], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf1798, buf1806, buf1802, buf1808, buf1812, buf1816, buf1824, buf1828, buf1832, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_128, p3_128, min_257, p2_128, img_63, neg_774, max_pool3d_452], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1809 = torch.ops.aten.max_pool3d_with_indices.default(buf1808, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1808
buf1810 = buf1809[0]
del buf1809
# Topologically Sorted Source Nodes: [p1_128, p3_128, min_257, p2_128, img_63, neg_776, max_pool3d_453], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1813 = torch.ops.aten.max_pool3d_with_indices.default(buf1812, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1812
buf1814 = buf1813[0]
del buf1813
# Topologically Sorted Source Nodes: [p1_128, p3_128, min_257, p2_128, img_63, neg_778, max_pool3d_454], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1817 = torch.ops.aten.max_pool3d_with_indices.default(buf1816, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1816
buf1818 = buf1817[0]
del buf1817
buf1820 = buf1810; del buf1810 # reuse
# Topologically Sorted Source Nodes: [p1_129, p3_129, min_259, p2_129, min_260], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf1820, buf1818, buf1814, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_129, p3_129, min_259, p2_129, min_260, img1_65], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1821 = torch.ops.aten.max_pool3d_with_indices.default(buf1820, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1822 = buf1821[0]
del buf1821
# Topologically Sorted Source Nodes: [p1_128, p3_128, min_257, p2_128, img_63, neg_780, max_pool3d_456], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1825 = torch.ops.aten.max_pool3d_with_indices.default(buf1824, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1826 = buf1825[0]
del buf1825
# Topologically Sorted Source Nodes: [p1_128, p3_128, min_257, p2_128, img_63, neg_782, max_pool3d_457], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1829 = torch.ops.aten.max_pool3d_with_indices.default(buf1828, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1830 = buf1829[0]
del buf1829
# Topologically Sorted Source Nodes: [p1_128, p3_128, min_257, p2_128, img_63, neg_784, max_pool3d_458], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1833 = torch.ops.aten.max_pool3d_with_indices.default(buf1832, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1834 = buf1833[0]
del buf1833
buf1836 = buf1832; del buf1832 # reuse
buf1840 = buf1828; del buf1828 # reuse
buf1844 = buf1824; del buf1824 # reuse
buf1852 = buf1820; del buf1820 # reuse
buf1856 = buf1818; del buf1818 # reuse
buf1860 = buf1814; del buf1814 # reuse
# Topologically Sorted Source Nodes: [p1_130, p3_130, min_261, p2_130, img_64, neg_786, neg_788, neg_790, neg_792, neg_794, neg_796], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf1826, buf1834, buf1830, buf1836, buf1840, buf1844, buf1852, buf1856, buf1860, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_130, p3_130, min_261, p2_130, img_64, neg_786, max_pool3d_459], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1837 = torch.ops.aten.max_pool3d_with_indices.default(buf1836, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1836
buf1838 = buf1837[0]
del buf1837
# Topologically Sorted Source Nodes: [p1_130, p3_130, min_261, p2_130, img_64, neg_788, max_pool3d_460], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1841 = torch.ops.aten.max_pool3d_with_indices.default(buf1840, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1840
buf1842 = buf1841[0]
del buf1841
# Topologically Sorted Source Nodes: [p1_130, p3_130, min_261, p2_130, img_64, neg_790, max_pool3d_461], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1845 = torch.ops.aten.max_pool3d_with_indices.default(buf1844, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1844
buf1846 = buf1845[0]
del buf1845
buf1848 = buf1838; del buf1838 # reuse
# Topologically Sorted Source Nodes: [p1_131, p3_131, min_263, p2_131, min_264], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf1848, buf1846, buf1842, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_131, p3_131, min_263, p2_131, min_264, img1_66], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1849 = torch.ops.aten.max_pool3d_with_indices.default(buf1848, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1850 = buf1849[0]
del buf1849
# Topologically Sorted Source Nodes: [p1_130, p3_130, min_261, p2_130, img_64, neg_792, max_pool3d_463], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1853 = torch.ops.aten.max_pool3d_with_indices.default(buf1852, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1854 = buf1853[0]
del buf1853
# Topologically Sorted Source Nodes: [p1_130, p3_130, min_261, p2_130, img_64, neg_794, max_pool3d_464], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1857 = torch.ops.aten.max_pool3d_with_indices.default(buf1856, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1858 = buf1857[0]
del buf1857
# Topologically Sorted Source Nodes: [p1_130, p3_130, min_261, p2_130, img_64, neg_796, max_pool3d_465], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1861 = torch.ops.aten.max_pool3d_with_indices.default(buf1860, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1862 = buf1861[0]
del buf1861
buf1864 = buf1860; del buf1860 # reuse
buf1868 = buf1856; del buf1856 # reuse
buf1872 = buf1852; del buf1852 # reuse
buf1880 = buf1848; del buf1848 # reuse
buf1884 = buf1846; del buf1846 # reuse
buf1888 = buf1842; del buf1842 # reuse
# Topologically Sorted Source Nodes: [p1_132, p3_132, min_265, p2_132, img_65, neg_798, neg_800, neg_802, neg_804, neg_806, neg_808], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf1854, buf1862, buf1858, buf1864, buf1868, buf1872, buf1880, buf1884, buf1888, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_132, p3_132, min_265, p2_132, img_65, neg_798, max_pool3d_466], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1865 = torch.ops.aten.max_pool3d_with_indices.default(buf1864, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1864
buf1866 = buf1865[0]
del buf1865
# Topologically Sorted Source Nodes: [p1_132, p3_132, min_265, p2_132, img_65, neg_800, max_pool3d_467], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1869 = torch.ops.aten.max_pool3d_with_indices.default(buf1868, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1868
buf1870 = buf1869[0]
del buf1869
# Topologically Sorted Source Nodes: [p1_132, p3_132, min_265, p2_132, img_65, neg_802, max_pool3d_468], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1873 = torch.ops.aten.max_pool3d_with_indices.default(buf1872, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1872
buf1874 = buf1873[0]
del buf1873
buf1876 = buf1866; del buf1866 # reuse
# Topologically Sorted Source Nodes: [p1_133, p3_133, min_267, p2_133, min_268], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf1876, buf1874, buf1870, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_133, p3_133, min_267, p2_133, min_268, img1_67], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1877 = torch.ops.aten.max_pool3d_with_indices.default(buf1876, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1878 = buf1877[0]
del buf1877
# Topologically Sorted Source Nodes: [p1_132, p3_132, min_265, p2_132, img_65, neg_804, max_pool3d_470], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1881 = torch.ops.aten.max_pool3d_with_indices.default(buf1880, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1882 = buf1881[0]
del buf1881
# Topologically Sorted Source Nodes: [p1_132, p3_132, min_265, p2_132, img_65, neg_806, max_pool3d_471], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1885 = torch.ops.aten.max_pool3d_with_indices.default(buf1884, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1886 = buf1885[0]
del buf1885
# Topologically Sorted Source Nodes: [p1_132, p3_132, min_265, p2_132, img_65, neg_808, max_pool3d_472], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1889 = torch.ops.aten.max_pool3d_with_indices.default(buf1888, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1890 = buf1889[0]
del buf1889
buf1892 = buf1888; del buf1888 # reuse
buf1896 = buf1884; del buf1884 # reuse
buf1900 = buf1880; del buf1880 # reuse
buf1908 = buf1876; del buf1876 # reuse
buf1912 = buf1874; del buf1874 # reuse
buf1916 = buf1870; del buf1870 # reuse
# Topologically Sorted Source Nodes: [p1_134, p3_134, min_269, p2_134, img_66, neg_810, neg_812, neg_814, neg_816, neg_818, neg_820], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf1882, buf1890, buf1886, buf1892, buf1896, buf1900, buf1908, buf1912, buf1916, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_134, p3_134, min_269, p2_134, img_66, neg_810, max_pool3d_473], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1893 = torch.ops.aten.max_pool3d_with_indices.default(buf1892, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1892
buf1894 = buf1893[0]
del buf1893
# Topologically Sorted Source Nodes: [p1_134, p3_134, min_269, p2_134, img_66, neg_812, max_pool3d_474], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1897 = torch.ops.aten.max_pool3d_with_indices.default(buf1896, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1896
buf1898 = buf1897[0]
del buf1897
# Topologically Sorted Source Nodes: [p1_134, p3_134, min_269, p2_134, img_66, neg_814, max_pool3d_475], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1901 = torch.ops.aten.max_pool3d_with_indices.default(buf1900, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1900
buf1902 = buf1901[0]
del buf1901
buf1904 = buf1894; del buf1894 # reuse
# Topologically Sorted Source Nodes: [p1_135, p3_135, min_271, p2_135, min_272], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf1904, buf1902, buf1898, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_135, p3_135, min_271, p2_135, min_272, img1_68], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1905 = torch.ops.aten.max_pool3d_with_indices.default(buf1904, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1906 = buf1905[0]
del buf1905
# Topologically Sorted Source Nodes: [p1_134, p3_134, min_269, p2_134, img_66, neg_816, max_pool3d_477], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1909 = torch.ops.aten.max_pool3d_with_indices.default(buf1908, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1910 = buf1909[0]
del buf1909
# Topologically Sorted Source Nodes: [p1_134, p3_134, min_269, p2_134, img_66, neg_818, max_pool3d_478], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1913 = torch.ops.aten.max_pool3d_with_indices.default(buf1912, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1914 = buf1913[0]
del buf1913
# Topologically Sorted Source Nodes: [p1_134, p3_134, min_269, p2_134, img_66, neg_820, max_pool3d_479], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1917 = torch.ops.aten.max_pool3d_with_indices.default(buf1916, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1918 = buf1917[0]
del buf1917
buf1920 = buf1916; del buf1916 # reuse
buf1924 = buf1912; del buf1912 # reuse
buf1928 = buf1908; del buf1908 # reuse
buf1936 = buf1904; del buf1904 # reuse
buf1940 = buf1902; del buf1902 # reuse
buf1944 = buf1898; del buf1898 # reuse
# Topologically Sorted Source Nodes: [p1_136, p3_136, min_273, p2_136, img_67, neg_822, neg_824, neg_826, neg_828, neg_830, neg_832], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf1910, buf1918, buf1914, buf1920, buf1924, buf1928, buf1936, buf1940, buf1944, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_136, p3_136, min_273, p2_136, img_67, neg_822, max_pool3d_480], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1921 = torch.ops.aten.max_pool3d_with_indices.default(buf1920, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1920
buf1922 = buf1921[0]
del buf1921
# Topologically Sorted Source Nodes: [p1_136, p3_136, min_273, p2_136, img_67, neg_824, max_pool3d_481], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1925 = torch.ops.aten.max_pool3d_with_indices.default(buf1924, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1924
buf1926 = buf1925[0]
del buf1925
# Topologically Sorted Source Nodes: [p1_136, p3_136, min_273, p2_136, img_67, neg_826, max_pool3d_482], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1929 = torch.ops.aten.max_pool3d_with_indices.default(buf1928, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1928
buf1930 = buf1929[0]
del buf1929
buf1932 = buf1922; del buf1922 # reuse
# Topologically Sorted Source Nodes: [p1_137, p3_137, min_275, p2_137, min_276], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf1932, buf1930, buf1926, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_137, p3_137, min_275, p2_137, min_276, img1_69], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1933 = torch.ops.aten.max_pool3d_with_indices.default(buf1932, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1934 = buf1933[0]
del buf1933
# Topologically Sorted Source Nodes: [p1_136, p3_136, min_273, p2_136, img_67, neg_828, max_pool3d_484], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1937 = torch.ops.aten.max_pool3d_with_indices.default(buf1936, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1938 = buf1937[0]
del buf1937
# Topologically Sorted Source Nodes: [p1_136, p3_136, min_273, p2_136, img_67, neg_830, max_pool3d_485], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1941 = torch.ops.aten.max_pool3d_with_indices.default(buf1940, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1942 = buf1941[0]
del buf1941
# Topologically Sorted Source Nodes: [p1_136, p3_136, min_273, p2_136, img_67, neg_832, max_pool3d_486], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1945 = torch.ops.aten.max_pool3d_with_indices.default(buf1944, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1946 = buf1945[0]
del buf1945
buf1948 = buf1944; del buf1944 # reuse
buf1952 = buf1940; del buf1940 # reuse
buf1956 = buf1936; del buf1936 # reuse
buf1964 = buf1932; del buf1932 # reuse
buf1968 = buf1930; del buf1930 # reuse
buf1972 = buf1926; del buf1926 # reuse
# Topologically Sorted Source Nodes: [p1_138, p3_138, min_277, p2_138, img_68, neg_834, neg_836, neg_838, neg_840, neg_842, neg_844], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf1938, buf1946, buf1942, buf1948, buf1952, buf1956, buf1964, buf1968, buf1972, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_138, p3_138, min_277, p2_138, img_68, neg_834, max_pool3d_487], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1949 = torch.ops.aten.max_pool3d_with_indices.default(buf1948, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1948
buf1950 = buf1949[0]
del buf1949
# Topologically Sorted Source Nodes: [p1_138, p3_138, min_277, p2_138, img_68, neg_836, max_pool3d_488], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1953 = torch.ops.aten.max_pool3d_with_indices.default(buf1952, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1952
buf1954 = buf1953[0]
del buf1953
# Topologically Sorted Source Nodes: [p1_138, p3_138, min_277, p2_138, img_68, neg_838, max_pool3d_489], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1957 = torch.ops.aten.max_pool3d_with_indices.default(buf1956, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1956
buf1958 = buf1957[0]
del buf1957
buf1960 = buf1950; del buf1950 # reuse
# Topologically Sorted Source Nodes: [p1_139, p3_139, min_279, p2_139, min_280], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf1960, buf1958, buf1954, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_139, p3_139, min_279, p2_139, min_280, img1_70], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1961 = torch.ops.aten.max_pool3d_with_indices.default(buf1960, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1962 = buf1961[0]
del buf1961
# Topologically Sorted Source Nodes: [p1_138, p3_138, min_277, p2_138, img_68, neg_840, max_pool3d_491], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1965 = torch.ops.aten.max_pool3d_with_indices.default(buf1964, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1966 = buf1965[0]
del buf1965
# Topologically Sorted Source Nodes: [p1_138, p3_138, min_277, p2_138, img_68, neg_842, max_pool3d_492], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1969 = torch.ops.aten.max_pool3d_with_indices.default(buf1968, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1970 = buf1969[0]
del buf1969
# Topologically Sorted Source Nodes: [p1_138, p3_138, min_277, p2_138, img_68, neg_844, max_pool3d_493], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1973 = torch.ops.aten.max_pool3d_with_indices.default(buf1972, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1974 = buf1973[0]
del buf1973
buf1976 = buf1972; del buf1972 # reuse
buf1980 = buf1968; del buf1968 # reuse
buf1984 = buf1964; del buf1964 # reuse
buf1992 = buf1960; del buf1960 # reuse
buf1996 = buf1958; del buf1958 # reuse
buf2000 = buf1954; del buf1954 # reuse
# Topologically Sorted Source Nodes: [p1_140, p3_140, min_281, p2_140, img_69, neg_846, neg_848, neg_850, neg_852, neg_854, neg_856], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf1966, buf1974, buf1970, buf1976, buf1980, buf1984, buf1992, buf1996, buf2000, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_140, p3_140, min_281, p2_140, img_69, neg_846, max_pool3d_494], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1977 = torch.ops.aten.max_pool3d_with_indices.default(buf1976, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1976
buf1978 = buf1977[0]
del buf1977
# Topologically Sorted Source Nodes: [p1_140, p3_140, min_281, p2_140, img_69, neg_848, max_pool3d_495], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1981 = torch.ops.aten.max_pool3d_with_indices.default(buf1980, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1980
buf1982 = buf1981[0]
del buf1981
# Topologically Sorted Source Nodes: [p1_140, p3_140, min_281, p2_140, img_69, neg_850, max_pool3d_496], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1985 = torch.ops.aten.max_pool3d_with_indices.default(buf1984, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1984
buf1986 = buf1985[0]
del buf1985
buf1988 = buf1978; del buf1978 # reuse
# Topologically Sorted Source Nodes: [p1_141, p3_141, min_283, p2_141, min_284], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf1988, buf1986, buf1982, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_141, p3_141, min_283, p2_141, min_284, img1_71], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1989 = torch.ops.aten.max_pool3d_with_indices.default(buf1988, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1990 = buf1989[0]
del buf1989
# Topologically Sorted Source Nodes: [p1_140, p3_140, min_281, p2_140, img_69, neg_852, max_pool3d_498], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1993 = torch.ops.aten.max_pool3d_with_indices.default(buf1992, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1994 = buf1993[0]
del buf1993
# Topologically Sorted Source Nodes: [p1_140, p3_140, min_281, p2_140, img_69, neg_854, max_pool3d_499], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf1997 = torch.ops.aten.max_pool3d_with_indices.default(buf1996, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1998 = buf1997[0]
del buf1997
# Topologically Sorted Source Nodes: [p1_140, p3_140, min_281, p2_140, img_69, neg_856, max_pool3d_500], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2001 = torch.ops.aten.max_pool3d_with_indices.default(buf2000, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2002 = buf2001[0]
del buf2001
buf2004 = buf2000; del buf2000 # reuse
buf2008 = buf1996; del buf1996 # reuse
buf2012 = buf1992; del buf1992 # reuse
buf2020 = buf1988; del buf1988 # reuse
buf2024 = buf1986; del buf1986 # reuse
buf2028 = buf1982; del buf1982 # reuse
# Topologically Sorted Source Nodes: [p1_142, p3_142, min_285, p2_142, img_70, neg_858, neg_860, neg_862, neg_864, neg_866, neg_868], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf1994, buf2002, buf1998, buf2004, buf2008, buf2012, buf2020, buf2024, buf2028, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_142, p3_142, min_285, p2_142, img_70, neg_858, max_pool3d_501], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2005 = torch.ops.aten.max_pool3d_with_indices.default(buf2004, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2004
buf2006 = buf2005[0]
del buf2005
# Topologically Sorted Source Nodes: [p1_142, p3_142, min_285, p2_142, img_70, neg_860, max_pool3d_502], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2009 = torch.ops.aten.max_pool3d_with_indices.default(buf2008, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2008
buf2010 = buf2009[0]
del buf2009
# Topologically Sorted Source Nodes: [p1_142, p3_142, min_285, p2_142, img_70, neg_862, max_pool3d_503], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2013 = torch.ops.aten.max_pool3d_with_indices.default(buf2012, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2012
buf2014 = buf2013[0]
del buf2013
buf2016 = buf2006; del buf2006 # reuse
# Topologically Sorted Source Nodes: [p1_143, p3_143, min_287, p2_143, min_288], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf2016, buf2014, buf2010, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_143, p3_143, min_287, p2_143, min_288, img1_72], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2017 = torch.ops.aten.max_pool3d_with_indices.default(buf2016, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2018 = buf2017[0]
del buf2017
# Topologically Sorted Source Nodes: [p1_142, p3_142, min_285, p2_142, img_70, neg_864, max_pool3d_505], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2021 = torch.ops.aten.max_pool3d_with_indices.default(buf2020, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2022 = buf2021[0]
del buf2021
# Topologically Sorted Source Nodes: [p1_142, p3_142, min_285, p2_142, img_70, neg_866, max_pool3d_506], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2025 = torch.ops.aten.max_pool3d_with_indices.default(buf2024, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2026 = buf2025[0]
del buf2025
# Topologically Sorted Source Nodes: [p1_142, p3_142, min_285, p2_142, img_70, neg_868, max_pool3d_507], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2029 = torch.ops.aten.max_pool3d_with_indices.default(buf2028, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2030 = buf2029[0]
del buf2029
buf2032 = buf2028; del buf2028 # reuse
buf2036 = buf2024; del buf2024 # reuse
buf2040 = buf2020; del buf2020 # reuse
buf2048 = buf2016; del buf2016 # reuse
buf2052 = buf2014; del buf2014 # reuse
buf2056 = buf2010; del buf2010 # reuse
# Topologically Sorted Source Nodes: [p1_144, p3_144, min_289, p2_144, img_71, neg_870, neg_872, neg_874, neg_876, neg_878, neg_880], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf2022, buf2030, buf2026, buf2032, buf2036, buf2040, buf2048, buf2052, buf2056, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_144, p3_144, min_289, p2_144, img_71, neg_870, max_pool3d_508], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2033 = torch.ops.aten.max_pool3d_with_indices.default(buf2032, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2032
buf2034 = buf2033[0]
del buf2033
# Topologically Sorted Source Nodes: [p1_144, p3_144, min_289, p2_144, img_71, neg_872, max_pool3d_509], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2037 = torch.ops.aten.max_pool3d_with_indices.default(buf2036, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2036
buf2038 = buf2037[0]
del buf2037
# Topologically Sorted Source Nodes: [p1_144, p3_144, min_289, p2_144, img_71, neg_874, max_pool3d_510], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2041 = torch.ops.aten.max_pool3d_with_indices.default(buf2040, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2040
buf2042 = buf2041[0]
del buf2041
buf2044 = buf2034; del buf2034 # reuse
# Topologically Sorted Source Nodes: [p1_145, p3_145, min_291, p2_145, min_292], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf2044, buf2042, buf2038, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_145, p3_145, min_291, p2_145, min_292, img1_73], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2045 = torch.ops.aten.max_pool3d_with_indices.default(buf2044, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2046 = buf2045[0]
del buf2045
# Topologically Sorted Source Nodes: [p1_144, p3_144, min_289, p2_144, img_71, neg_876, max_pool3d_512], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2049 = torch.ops.aten.max_pool3d_with_indices.default(buf2048, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2050 = buf2049[0]
del buf2049
# Topologically Sorted Source Nodes: [p1_144, p3_144, min_289, p2_144, img_71, neg_878, max_pool3d_513], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2053 = torch.ops.aten.max_pool3d_with_indices.default(buf2052, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2054 = buf2053[0]
del buf2053
# Topologically Sorted Source Nodes: [p1_144, p3_144, min_289, p2_144, img_71, neg_880, max_pool3d_514], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2057 = torch.ops.aten.max_pool3d_with_indices.default(buf2056, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2058 = buf2057[0]
del buf2057
buf2060 = buf2056; del buf2056 # reuse
buf2064 = buf2052; del buf2052 # reuse
buf2068 = buf2048; del buf2048 # reuse
buf2076 = buf2044; del buf2044 # reuse
buf2080 = buf2042; del buf2042 # reuse
buf2084 = buf2038; del buf2038 # reuse
# Topologically Sorted Source Nodes: [p1_146, p3_146, min_293, p2_146, img_72, neg_882, neg_884, neg_886, neg_888, neg_890, neg_892], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf2050, buf2058, buf2054, buf2060, buf2064, buf2068, buf2076, buf2080, buf2084, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_146, p3_146, min_293, p2_146, img_72, neg_882, max_pool3d_515], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2061 = torch.ops.aten.max_pool3d_with_indices.default(buf2060, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2060
buf2062 = buf2061[0]
del buf2061
# Topologically Sorted Source Nodes: [p1_146, p3_146, min_293, p2_146, img_72, neg_884, max_pool3d_516], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2065 = torch.ops.aten.max_pool3d_with_indices.default(buf2064, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2064
buf2066 = buf2065[0]
del buf2065
# Topologically Sorted Source Nodes: [p1_146, p3_146, min_293, p2_146, img_72, neg_886, max_pool3d_517], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2069 = torch.ops.aten.max_pool3d_with_indices.default(buf2068, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2068
buf2070 = buf2069[0]
del buf2069
buf2072 = buf2062; del buf2062 # reuse
# Topologically Sorted Source Nodes: [p1_147, p3_147, min_295, p2_147, min_296], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf2072, buf2070, buf2066, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_147, p3_147, min_295, p2_147, min_296, img1_74], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2073 = torch.ops.aten.max_pool3d_with_indices.default(buf2072, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2074 = buf2073[0]
del buf2073
# Topologically Sorted Source Nodes: [p1_146, p3_146, min_293, p2_146, img_72, neg_888, max_pool3d_519], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2077 = torch.ops.aten.max_pool3d_with_indices.default(buf2076, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2078 = buf2077[0]
del buf2077
# Topologically Sorted Source Nodes: [p1_146, p3_146, min_293, p2_146, img_72, neg_890, max_pool3d_520], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2081 = torch.ops.aten.max_pool3d_with_indices.default(buf2080, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2082 = buf2081[0]
del buf2081
# Topologically Sorted Source Nodes: [p1_146, p3_146, min_293, p2_146, img_72, neg_892, max_pool3d_521], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2085 = torch.ops.aten.max_pool3d_with_indices.default(buf2084, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2086 = buf2085[0]
del buf2085
buf2088 = buf2084; del buf2084 # reuse
buf2092 = buf2080; del buf2080 # reuse
buf2096 = buf2076; del buf2076 # reuse
buf2104 = buf2072; del buf2072 # reuse
buf2108 = buf2070; del buf2070 # reuse
buf2112 = buf2066; del buf2066 # reuse
# Topologically Sorted Source Nodes: [p1_148, p3_148, min_297, p2_148, img_73, neg_894, neg_896, neg_898, neg_900, neg_902, neg_904], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf2078, buf2086, buf2082, buf2088, buf2092, buf2096, buf2104, buf2108, buf2112, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_148, p3_148, min_297, p2_148, img_73, neg_894, max_pool3d_522], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2089 = torch.ops.aten.max_pool3d_with_indices.default(buf2088, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2088
buf2090 = buf2089[0]
del buf2089
# Topologically Sorted Source Nodes: [p1_148, p3_148, min_297, p2_148, img_73, neg_896, max_pool3d_523], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2093 = torch.ops.aten.max_pool3d_with_indices.default(buf2092, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2092
buf2094 = buf2093[0]
del buf2093
# Topologically Sorted Source Nodes: [p1_148, p3_148, min_297, p2_148, img_73, neg_898, max_pool3d_524], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2097 = torch.ops.aten.max_pool3d_with_indices.default(buf2096, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2096
buf2098 = buf2097[0]
del buf2097
buf2100 = buf2090; del buf2090 # reuse
# Topologically Sorted Source Nodes: [p1_149, p3_149, min_299, p2_149, min_300], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf2100, buf2098, buf2094, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_149, p3_149, min_299, p2_149, min_300, img1_75], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2101 = torch.ops.aten.max_pool3d_with_indices.default(buf2100, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2102 = buf2101[0]
del buf2101
# Topologically Sorted Source Nodes: [p1_148, p3_148, min_297, p2_148, img_73, neg_900, max_pool3d_526], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2105 = torch.ops.aten.max_pool3d_with_indices.default(buf2104, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2106 = buf2105[0]
del buf2105
# Topologically Sorted Source Nodes: [p1_148, p3_148, min_297, p2_148, img_73, neg_902, max_pool3d_527], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2109 = torch.ops.aten.max_pool3d_with_indices.default(buf2108, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2110 = buf2109[0]
del buf2109
# Topologically Sorted Source Nodes: [p1_148, p3_148, min_297, p2_148, img_73, neg_904, max_pool3d_528], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2113 = torch.ops.aten.max_pool3d_with_indices.default(buf2112, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2114 = buf2113[0]
del buf2113
buf2116 = buf2112; del buf2112 # reuse
buf2120 = buf2108; del buf2108 # reuse
buf2124 = buf2104; del buf2104 # reuse
buf2132 = buf2100; del buf2100 # reuse
buf2136 = buf2098; del buf2098 # reuse
buf2140 = buf2094; del buf2094 # reuse
# Topologically Sorted Source Nodes: [p1_150, p3_150, min_301, p2_150, img_74, neg_906, neg_908, neg_910, neg_912, neg_914, neg_916], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf2106, buf2114, buf2110, buf2116, buf2120, buf2124, buf2132, buf2136, buf2140, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_150, p3_150, min_301, p2_150, img_74, neg_906, max_pool3d_529], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2117 = torch.ops.aten.max_pool3d_with_indices.default(buf2116, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2116
buf2118 = buf2117[0]
del buf2117
# Topologically Sorted Source Nodes: [p1_150, p3_150, min_301, p2_150, img_74, neg_908, max_pool3d_530], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2121 = torch.ops.aten.max_pool3d_with_indices.default(buf2120, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2120
buf2122 = buf2121[0]
del buf2121
# Topologically Sorted Source Nodes: [p1_150, p3_150, min_301, p2_150, img_74, neg_910, max_pool3d_531], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2125 = torch.ops.aten.max_pool3d_with_indices.default(buf2124, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2124
buf2126 = buf2125[0]
del buf2125
buf2128 = buf2118; del buf2118 # reuse
# Topologically Sorted Source Nodes: [p1_151, p3_151, min_303, p2_151, min_304], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf2128, buf2126, buf2122, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_151, p3_151, min_303, p2_151, min_304, img1_76], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2129 = torch.ops.aten.max_pool3d_with_indices.default(buf2128, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2130 = buf2129[0]
del buf2129
# Topologically Sorted Source Nodes: [p1_150, p3_150, min_301, p2_150, img_74, neg_912, max_pool3d_533], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2133 = torch.ops.aten.max_pool3d_with_indices.default(buf2132, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2134 = buf2133[0]
del buf2133
# Topologically Sorted Source Nodes: [p1_150, p3_150, min_301, p2_150, img_74, neg_914, max_pool3d_534], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2137 = torch.ops.aten.max_pool3d_with_indices.default(buf2136, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2138 = buf2137[0]
del buf2137
# Topologically Sorted Source Nodes: [p1_150, p3_150, min_301, p2_150, img_74, neg_916, max_pool3d_535], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2141 = torch.ops.aten.max_pool3d_with_indices.default(buf2140, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2142 = buf2141[0]
del buf2141
buf2144 = buf2140; del buf2140 # reuse
buf2148 = buf2136; del buf2136 # reuse
buf2152 = buf2132; del buf2132 # reuse
buf2160 = buf2128; del buf2128 # reuse
buf2164 = buf2126; del buf2126 # reuse
buf2168 = buf2122; del buf2122 # reuse
# Topologically Sorted Source Nodes: [p1_152, p3_152, min_305, p2_152, img_75, neg_918, neg_920, neg_922, neg_924, neg_926, neg_928], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf2134, buf2142, buf2138, buf2144, buf2148, buf2152, buf2160, buf2164, buf2168, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_152, p3_152, min_305, p2_152, img_75, neg_918, max_pool3d_536], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2145 = torch.ops.aten.max_pool3d_with_indices.default(buf2144, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2144
buf2146 = buf2145[0]
del buf2145
# Topologically Sorted Source Nodes: [p1_152, p3_152, min_305, p2_152, img_75, neg_920, max_pool3d_537], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2149 = torch.ops.aten.max_pool3d_with_indices.default(buf2148, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2148
buf2150 = buf2149[0]
del buf2149
# Topologically Sorted Source Nodes: [p1_152, p3_152, min_305, p2_152, img_75, neg_922, max_pool3d_538], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2153 = torch.ops.aten.max_pool3d_with_indices.default(buf2152, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2152
buf2154 = buf2153[0]
del buf2153
buf2156 = buf2146; del buf2146 # reuse
# Topologically Sorted Source Nodes: [p1_153, p3_153, min_307, p2_153, min_308], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf2156, buf2154, buf2150, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_153, p3_153, min_307, p2_153, min_308, img1_77], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2157 = torch.ops.aten.max_pool3d_with_indices.default(buf2156, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2158 = buf2157[0]
del buf2157
# Topologically Sorted Source Nodes: [p1_152, p3_152, min_305, p2_152, img_75, neg_924, max_pool3d_540], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2161 = torch.ops.aten.max_pool3d_with_indices.default(buf2160, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2162 = buf2161[0]
del buf2161
# Topologically Sorted Source Nodes: [p1_152, p3_152, min_305, p2_152, img_75, neg_926, max_pool3d_541], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2165 = torch.ops.aten.max_pool3d_with_indices.default(buf2164, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2166 = buf2165[0]
del buf2165
# Topologically Sorted Source Nodes: [p1_152, p3_152, min_305, p2_152, img_75, neg_928, max_pool3d_542], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2169 = torch.ops.aten.max_pool3d_with_indices.default(buf2168, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2170 = buf2169[0]
del buf2169
buf2172 = buf2168; del buf2168 # reuse
buf2176 = buf2164; del buf2164 # reuse
buf2180 = buf2160; del buf2160 # reuse
buf2188 = buf2156; del buf2156 # reuse
buf2192 = buf2154; del buf2154 # reuse
buf2196 = buf2150; del buf2150 # reuse
# Topologically Sorted Source Nodes: [p1_154, p3_154, min_309, p2_154, img_76, neg_930, neg_932, neg_934, neg_936, neg_938, neg_940], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf2162, buf2170, buf2166, buf2172, buf2176, buf2180, buf2188, buf2192, buf2196, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_154, p3_154, min_309, p2_154, img_76, neg_930, max_pool3d_543], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2173 = torch.ops.aten.max_pool3d_with_indices.default(buf2172, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2172
buf2174 = buf2173[0]
del buf2173
# Topologically Sorted Source Nodes: [p1_154, p3_154, min_309, p2_154, img_76, neg_932, max_pool3d_544], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2177 = torch.ops.aten.max_pool3d_with_indices.default(buf2176, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2176
buf2178 = buf2177[0]
del buf2177
# Topologically Sorted Source Nodes: [p1_154, p3_154, min_309, p2_154, img_76, neg_934, max_pool3d_545], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2181 = torch.ops.aten.max_pool3d_with_indices.default(buf2180, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2180
buf2182 = buf2181[0]
del buf2181
buf2184 = buf2174; del buf2174 # reuse
# Topologically Sorted Source Nodes: [p1_155, p3_155, min_311, p2_155, min_312], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf2184, buf2182, buf2178, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_155, p3_155, min_311, p2_155, min_312, img1_78], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2185 = torch.ops.aten.max_pool3d_with_indices.default(buf2184, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2186 = buf2185[0]
del buf2185
# Topologically Sorted Source Nodes: [p1_154, p3_154, min_309, p2_154, img_76, neg_936, max_pool3d_547], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2189 = torch.ops.aten.max_pool3d_with_indices.default(buf2188, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2190 = buf2189[0]
del buf2189
# Topologically Sorted Source Nodes: [p1_154, p3_154, min_309, p2_154, img_76, neg_938, max_pool3d_548], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2193 = torch.ops.aten.max_pool3d_with_indices.default(buf2192, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2194 = buf2193[0]
del buf2193
# Topologically Sorted Source Nodes: [p1_154, p3_154, min_309, p2_154, img_76, neg_940, max_pool3d_549], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2197 = torch.ops.aten.max_pool3d_with_indices.default(buf2196, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2198 = buf2197[0]
del buf2197
buf2200 = buf2196; del buf2196 # reuse
buf2204 = buf2192; del buf2192 # reuse
buf2208 = buf2188; del buf2188 # reuse
buf2216 = buf2184; del buf2184 # reuse
buf2220 = buf2182; del buf2182 # reuse
buf2224 = buf2178; del buf2178 # reuse
# Topologically Sorted Source Nodes: [p1_156, p3_156, min_313, p2_156, img_77, neg_942, neg_944, neg_946, neg_948, neg_950, neg_952], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf2190, buf2198, buf2194, buf2200, buf2204, buf2208, buf2216, buf2220, buf2224, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_156, p3_156, min_313, p2_156, img_77, neg_942, max_pool3d_550], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2201 = torch.ops.aten.max_pool3d_with_indices.default(buf2200, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2200
buf2202 = buf2201[0]
del buf2201
# Topologically Sorted Source Nodes: [p1_156, p3_156, min_313, p2_156, img_77, neg_944, max_pool3d_551], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2205 = torch.ops.aten.max_pool3d_with_indices.default(buf2204, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2204
buf2206 = buf2205[0]
del buf2205
# Topologically Sorted Source Nodes: [p1_156, p3_156, min_313, p2_156, img_77, neg_946, max_pool3d_552], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2209 = torch.ops.aten.max_pool3d_with_indices.default(buf2208, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2208
buf2210 = buf2209[0]
del buf2209
buf2212 = buf2202; del buf2202 # reuse
# Topologically Sorted Source Nodes: [p1_157, p3_157, min_315, p2_157, min_316], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf2212, buf2210, buf2206, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_157, p3_157, min_315, p2_157, min_316, img1_79], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2213 = torch.ops.aten.max_pool3d_with_indices.default(buf2212, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2214 = buf2213[0]
del buf2213
# Topologically Sorted Source Nodes: [p1_156, p3_156, min_313, p2_156, img_77, neg_948, max_pool3d_554], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2217 = torch.ops.aten.max_pool3d_with_indices.default(buf2216, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2218 = buf2217[0]
del buf2217
# Topologically Sorted Source Nodes: [p1_156, p3_156, min_313, p2_156, img_77, neg_950, max_pool3d_555], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2221 = torch.ops.aten.max_pool3d_with_indices.default(buf2220, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2222 = buf2221[0]
del buf2221
# Topologically Sorted Source Nodes: [p1_156, p3_156, min_313, p2_156, img_77, neg_952, max_pool3d_556], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2225 = torch.ops.aten.max_pool3d_with_indices.default(buf2224, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2226 = buf2225[0]
del buf2225
buf2228 = buf2224; del buf2224 # reuse
buf2232 = buf2220; del buf2220 # reuse
buf2236 = buf2216; del buf2216 # reuse
buf2244 = buf2212; del buf2212 # reuse
buf2248 = buf2210; del buf2210 # reuse
buf2252 = buf2206; del buf2206 # reuse
# Topologically Sorted Source Nodes: [p1_158, p3_158, min_317, p2_158, img_78, neg_954, neg_956, neg_958, neg_960, neg_962, neg_964], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf2218, buf2226, buf2222, buf2228, buf2232, buf2236, buf2244, buf2248, buf2252, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_158, p3_158, min_317, p2_158, img_78, neg_954, max_pool3d_557], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2229 = torch.ops.aten.max_pool3d_with_indices.default(buf2228, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2228
buf2230 = buf2229[0]
del buf2229
# Topologically Sorted Source Nodes: [p1_158, p3_158, min_317, p2_158, img_78, neg_956, max_pool3d_558], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2233 = torch.ops.aten.max_pool3d_with_indices.default(buf2232, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2232
buf2234 = buf2233[0]
del buf2233
# Topologically Sorted Source Nodes: [p1_158, p3_158, min_317, p2_158, img_78, neg_958, max_pool3d_559], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2237 = torch.ops.aten.max_pool3d_with_indices.default(buf2236, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2236
buf2238 = buf2237[0]
del buf2237
buf2240 = buf2230; del buf2230 # reuse
# Topologically Sorted Source Nodes: [p1_159, p3_159, min_319, p2_159, min_320], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf2240, buf2238, buf2234, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_159, p3_159, min_319, p2_159, min_320, img1_80], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2241 = torch.ops.aten.max_pool3d_with_indices.default(buf2240, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2242 = buf2241[0]
del buf2241
# Topologically Sorted Source Nodes: [p1_158, p3_158, min_317, p2_158, img_78, neg_960, max_pool3d_561], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2245 = torch.ops.aten.max_pool3d_with_indices.default(buf2244, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2246 = buf2245[0]
del buf2245
# Topologically Sorted Source Nodes: [p1_158, p3_158, min_317, p2_158, img_78, neg_962, max_pool3d_562], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2249 = torch.ops.aten.max_pool3d_with_indices.default(buf2248, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2250 = buf2249[0]
del buf2249
# Topologically Sorted Source Nodes: [p1_158, p3_158, min_317, p2_158, img_78, neg_964, max_pool3d_563], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2253 = torch.ops.aten.max_pool3d_with_indices.default(buf2252, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2254 = buf2253[0]
del buf2253
buf2256 = buf2252; del buf2252 # reuse
buf2260 = buf2248; del buf2248 # reuse
buf2264 = buf2244; del buf2244 # reuse
buf2272 = buf2240; del buf2240 # reuse
buf2276 = buf2238; del buf2238 # reuse
buf2280 = buf2234; del buf2234 # reuse
# Topologically Sorted Source Nodes: [p1_160, p3_160, min_321, p2_160, img_79, neg_966, neg_968, neg_970, neg_972, neg_974, neg_976], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf2246, buf2254, buf2250, buf2256, buf2260, buf2264, buf2272, buf2276, buf2280, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_160, p3_160, min_321, p2_160, img_79, neg_966, max_pool3d_564], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2257 = torch.ops.aten.max_pool3d_with_indices.default(buf2256, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2256
buf2258 = buf2257[0]
del buf2257
# Topologically Sorted Source Nodes: [p1_160, p3_160, min_321, p2_160, img_79, neg_968, max_pool3d_565], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2261 = torch.ops.aten.max_pool3d_with_indices.default(buf2260, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2260
buf2262 = buf2261[0]
del buf2261
# Topologically Sorted Source Nodes: [p1_160, p3_160, min_321, p2_160, img_79, neg_970, max_pool3d_566], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2265 = torch.ops.aten.max_pool3d_with_indices.default(buf2264, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2264
buf2266 = buf2265[0]
del buf2265
buf2268 = buf2258; del buf2258 # reuse
# Topologically Sorted Source Nodes: [p1_161, p3_161, min_323, p2_161, min_324], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf2268, buf2266, buf2262, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_161, p3_161, min_323, p2_161, min_324, img1_81], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2269 = torch.ops.aten.max_pool3d_with_indices.default(buf2268, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2270 = buf2269[0]
del buf2269
# Topologically Sorted Source Nodes: [p1_160, p3_160, min_321, p2_160, img_79, neg_972, max_pool3d_568], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2273 = torch.ops.aten.max_pool3d_with_indices.default(buf2272, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2274 = buf2273[0]
del buf2273
# Topologically Sorted Source Nodes: [p1_160, p3_160, min_321, p2_160, img_79, neg_974, max_pool3d_569], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2277 = torch.ops.aten.max_pool3d_with_indices.default(buf2276, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2278 = buf2277[0]
del buf2277
# Topologically Sorted Source Nodes: [p1_160, p3_160, min_321, p2_160, img_79, neg_976, max_pool3d_570], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2281 = torch.ops.aten.max_pool3d_with_indices.default(buf2280, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2282 = buf2281[0]
del buf2281
buf2284 = buf2280; del buf2280 # reuse
buf2288 = buf2276; del buf2276 # reuse
buf2292 = buf2272; del buf2272 # reuse
buf2300 = buf2268; del buf2268 # reuse
buf2304 = buf2266; del buf2266 # reuse
buf2308 = buf2262; del buf2262 # reuse
# Topologically Sorted Source Nodes: [p1_162, p3_162, min_325, p2_162, img_80, neg_978, neg_980, neg_982, neg_984, neg_986, neg_988], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf2274, buf2282, buf2278, buf2284, buf2288, buf2292, buf2300, buf2304, buf2308, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_162, p3_162, min_325, p2_162, img_80, neg_978, max_pool3d_571], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2285 = torch.ops.aten.max_pool3d_with_indices.default(buf2284, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2284
buf2286 = buf2285[0]
del buf2285
# Topologically Sorted Source Nodes: [p1_162, p3_162, min_325, p2_162, img_80, neg_980, max_pool3d_572], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2289 = torch.ops.aten.max_pool3d_with_indices.default(buf2288, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2288
buf2290 = buf2289[0]
del buf2289
# Topologically Sorted Source Nodes: [p1_162, p3_162, min_325, p2_162, img_80, neg_982, max_pool3d_573], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2293 = torch.ops.aten.max_pool3d_with_indices.default(buf2292, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2292
buf2294 = buf2293[0]
del buf2293
buf2296 = buf2286; del buf2286 # reuse
# Topologically Sorted Source Nodes: [p1_163, p3_163, min_327, p2_163, min_328], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf2296, buf2294, buf2290, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_163, p3_163, min_327, p2_163, min_328, img1_82], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2297 = torch.ops.aten.max_pool3d_with_indices.default(buf2296, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2298 = buf2297[0]
del buf2297
# Topologically Sorted Source Nodes: [p1_162, p3_162, min_325, p2_162, img_80, neg_984, max_pool3d_575], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2301 = torch.ops.aten.max_pool3d_with_indices.default(buf2300, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2302 = buf2301[0]
del buf2301
# Topologically Sorted Source Nodes: [p1_162, p3_162, min_325, p2_162, img_80, neg_986, max_pool3d_576], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2305 = torch.ops.aten.max_pool3d_with_indices.default(buf2304, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2306 = buf2305[0]
del buf2305
# Topologically Sorted Source Nodes: [p1_162, p3_162, min_325, p2_162, img_80, neg_988, max_pool3d_577], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2309 = torch.ops.aten.max_pool3d_with_indices.default(buf2308, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2310 = buf2309[0]
del buf2309
buf2312 = buf2308; del buf2308 # reuse
buf2316 = buf2304; del buf2304 # reuse
buf2320 = buf2300; del buf2300 # reuse
buf2328 = buf2296; del buf2296 # reuse
buf2332 = buf2294; del buf2294 # reuse
buf2336 = buf2290; del buf2290 # reuse
# Topologically Sorted Source Nodes: [p1_164, p3_164, min_329, p2_164, img_81, neg_990, neg_992, neg_994, neg_996, neg_998, neg_1000], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf2302, buf2310, buf2306, buf2312, buf2316, buf2320, buf2328, buf2332, buf2336, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_164, p3_164, min_329, p2_164, img_81, neg_990, max_pool3d_578], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2313 = torch.ops.aten.max_pool3d_with_indices.default(buf2312, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2312
buf2314 = buf2313[0]
del buf2313
# Topologically Sorted Source Nodes: [p1_164, p3_164, min_329, p2_164, img_81, neg_992, max_pool3d_579], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2317 = torch.ops.aten.max_pool3d_with_indices.default(buf2316, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2316
buf2318 = buf2317[0]
del buf2317
# Topologically Sorted Source Nodes: [p1_164, p3_164, min_329, p2_164, img_81, neg_994, max_pool3d_580], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2321 = torch.ops.aten.max_pool3d_with_indices.default(buf2320, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2320
buf2322 = buf2321[0]
del buf2321
buf2324 = buf2314; del buf2314 # reuse
# Topologically Sorted Source Nodes: [p1_165, p3_165, min_331, p2_165, min_332], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf2324, buf2322, buf2318, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_165, p3_165, min_331, p2_165, min_332, img1_83], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2325 = torch.ops.aten.max_pool3d_with_indices.default(buf2324, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2326 = buf2325[0]
del buf2325
# Topologically Sorted Source Nodes: [p1_164, p3_164, min_329, p2_164, img_81, neg_996, max_pool3d_582], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2329 = torch.ops.aten.max_pool3d_with_indices.default(buf2328, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2330 = buf2329[0]
del buf2329
# Topologically Sorted Source Nodes: [p1_164, p3_164, min_329, p2_164, img_81, neg_998, max_pool3d_583], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2333 = torch.ops.aten.max_pool3d_with_indices.default(buf2332, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2334 = buf2333[0]
del buf2333
# Topologically Sorted Source Nodes: [p1_164, p3_164, min_329, p2_164, img_81, neg_1000, max_pool3d_584], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2337 = torch.ops.aten.max_pool3d_with_indices.default(buf2336, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2338 = buf2337[0]
del buf2337
buf2340 = buf2336; del buf2336 # reuse
buf2344 = buf2332; del buf2332 # reuse
buf2348 = buf2328; del buf2328 # reuse
buf2356 = buf2324; del buf2324 # reuse
buf2360 = buf2322; del buf2322 # reuse
buf2364 = buf2318; del buf2318 # reuse
# Topologically Sorted Source Nodes: [p1_166, p3_166, min_333, p2_166, img_82, neg_1002, neg_1004, neg_1006, neg_1008, neg_1010, neg_1012], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf2330, buf2338, buf2334, buf2340, buf2344, buf2348, buf2356, buf2360, buf2364, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_166, p3_166, min_333, p2_166, img_82, neg_1002, max_pool3d_585], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2341 = torch.ops.aten.max_pool3d_with_indices.default(buf2340, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2340
buf2342 = buf2341[0]
del buf2341
# Topologically Sorted Source Nodes: [p1_166, p3_166, min_333, p2_166, img_82, neg_1004, max_pool3d_586], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2345 = torch.ops.aten.max_pool3d_with_indices.default(buf2344, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2344
buf2346 = buf2345[0]
del buf2345
# Topologically Sorted Source Nodes: [p1_166, p3_166, min_333, p2_166, img_82, neg_1006, max_pool3d_587], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2349 = torch.ops.aten.max_pool3d_with_indices.default(buf2348, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2348
buf2350 = buf2349[0]
del buf2349
buf2352 = buf2342; del buf2342 # reuse
# Topologically Sorted Source Nodes: [p1_167, p3_167, min_335, p2_167, min_336], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf2352, buf2350, buf2346, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_167, p3_167, min_335, p2_167, min_336, img1_84], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2353 = torch.ops.aten.max_pool3d_with_indices.default(buf2352, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2354 = buf2353[0]
del buf2353
# Topologically Sorted Source Nodes: [p1_166, p3_166, min_333, p2_166, img_82, neg_1008, max_pool3d_589], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2357 = torch.ops.aten.max_pool3d_with_indices.default(buf2356, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2358 = buf2357[0]
del buf2357
# Topologically Sorted Source Nodes: [p1_166, p3_166, min_333, p2_166, img_82, neg_1010, max_pool3d_590], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2361 = torch.ops.aten.max_pool3d_with_indices.default(buf2360, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2362 = buf2361[0]
del buf2361
# Topologically Sorted Source Nodes: [p1_166, p3_166, min_333, p2_166, img_82, neg_1012, max_pool3d_591], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2365 = torch.ops.aten.max_pool3d_with_indices.default(buf2364, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2366 = buf2365[0]
del buf2365
buf2368 = buf2364; del buf2364 # reuse
buf2372 = buf2360; del buf2360 # reuse
buf2376 = buf2356; del buf2356 # reuse
buf2384 = buf2352; del buf2352 # reuse
buf2388 = buf2350; del buf2350 # reuse
buf2392 = buf2346; del buf2346 # reuse
# Topologically Sorted Source Nodes: [p1_168, p3_168, min_337, p2_168, img_83, neg_1014, neg_1016, neg_1018, neg_1020, neg_1022, neg_1024], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf2358, buf2366, buf2362, buf2368, buf2372, buf2376, buf2384, buf2388, buf2392, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_168, p3_168, min_337, p2_168, img_83, neg_1014, max_pool3d_592], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2369 = torch.ops.aten.max_pool3d_with_indices.default(buf2368, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2368
buf2370 = buf2369[0]
del buf2369
# Topologically Sorted Source Nodes: [p1_168, p3_168, min_337, p2_168, img_83, neg_1016, max_pool3d_593], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2373 = torch.ops.aten.max_pool3d_with_indices.default(buf2372, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2372
buf2374 = buf2373[0]
del buf2373
# Topologically Sorted Source Nodes: [p1_168, p3_168, min_337, p2_168, img_83, neg_1018, max_pool3d_594], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2377 = torch.ops.aten.max_pool3d_with_indices.default(buf2376, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2376
buf2378 = buf2377[0]
del buf2377
buf2380 = buf2370; del buf2370 # reuse
# Topologically Sorted Source Nodes: [p1_169, p3_169, min_339, p2_169, min_340], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf2380, buf2378, buf2374, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_169, p3_169, min_339, p2_169, min_340, img1_85], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2381 = torch.ops.aten.max_pool3d_with_indices.default(buf2380, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2382 = buf2381[0]
del buf2381
# Topologically Sorted Source Nodes: [p1_168, p3_168, min_337, p2_168, img_83, neg_1020, max_pool3d_596], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2385 = torch.ops.aten.max_pool3d_with_indices.default(buf2384, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2386 = buf2385[0]
del buf2385
# Topologically Sorted Source Nodes: [p1_168, p3_168, min_337, p2_168, img_83, neg_1022, max_pool3d_597], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2389 = torch.ops.aten.max_pool3d_with_indices.default(buf2388, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2390 = buf2389[0]
del buf2389
# Topologically Sorted Source Nodes: [p1_168, p3_168, min_337, p2_168, img_83, neg_1024, max_pool3d_598], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2393 = torch.ops.aten.max_pool3d_with_indices.default(buf2392, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2394 = buf2393[0]
del buf2393
buf2396 = buf2392; del buf2392 # reuse
buf2400 = buf2388; del buf2388 # reuse
buf2404 = buf2384; del buf2384 # reuse
buf2412 = buf2380; del buf2380 # reuse
buf2416 = buf2378; del buf2378 # reuse
buf2420 = buf2374; del buf2374 # reuse
# Topologically Sorted Source Nodes: [p1_170, p3_170, min_341, p2_170, img_84, neg_1026, neg_1028, neg_1030, neg_1032, neg_1034, neg_1036], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf2386, buf2394, buf2390, buf2396, buf2400, buf2404, buf2412, buf2416, buf2420, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_170, p3_170, min_341, p2_170, img_84, neg_1026, max_pool3d_599], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2397 = torch.ops.aten.max_pool3d_with_indices.default(buf2396, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2396
buf2398 = buf2397[0]
del buf2397
# Topologically Sorted Source Nodes: [p1_170, p3_170, min_341, p2_170, img_84, neg_1028, max_pool3d_600], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2401 = torch.ops.aten.max_pool3d_with_indices.default(buf2400, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2400
buf2402 = buf2401[0]
del buf2401
# Topologically Sorted Source Nodes: [p1_170, p3_170, min_341, p2_170, img_84, neg_1030, max_pool3d_601], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2405 = torch.ops.aten.max_pool3d_with_indices.default(buf2404, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2404
buf2406 = buf2405[0]
del buf2405
buf2408 = buf2398; del buf2398 # reuse
# Topologically Sorted Source Nodes: [p1_171, p3_171, min_343, p2_171, min_344], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf2408, buf2406, buf2402, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_171, p3_171, min_343, p2_171, min_344, img1_86], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2409 = torch.ops.aten.max_pool3d_with_indices.default(buf2408, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2410 = buf2409[0]
del buf2409
# Topologically Sorted Source Nodes: [p1_170, p3_170, min_341, p2_170, img_84, neg_1032, max_pool3d_603], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2413 = torch.ops.aten.max_pool3d_with_indices.default(buf2412, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2414 = buf2413[0]
del buf2413
# Topologically Sorted Source Nodes: [p1_170, p3_170, min_341, p2_170, img_84, neg_1034, max_pool3d_604], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2417 = torch.ops.aten.max_pool3d_with_indices.default(buf2416, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2418 = buf2417[0]
del buf2417
# Topologically Sorted Source Nodes: [p1_170, p3_170, min_341, p2_170, img_84, neg_1036, max_pool3d_605], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2421 = torch.ops.aten.max_pool3d_with_indices.default(buf2420, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2422 = buf2421[0]
del buf2421
buf2424 = buf2420; del buf2420 # reuse
buf2428 = buf2416; del buf2416 # reuse
buf2432 = buf2412; del buf2412 # reuse
buf2440 = buf2408; del buf2408 # reuse
buf2444 = buf2406; del buf2406 # reuse
buf2448 = buf2402; del buf2402 # reuse
# Topologically Sorted Source Nodes: [p1_172, p3_172, min_345, p2_172, img_85, neg_1038, neg_1040, neg_1042, neg_1044, neg_1046, neg_1048], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf2414, buf2422, buf2418, buf2424, buf2428, buf2432, buf2440, buf2444, buf2448, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_172, p3_172, min_345, p2_172, img_85, neg_1038, max_pool3d_606], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2425 = torch.ops.aten.max_pool3d_with_indices.default(buf2424, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2424
buf2426 = buf2425[0]
del buf2425
# Topologically Sorted Source Nodes: [p1_172, p3_172, min_345, p2_172, img_85, neg_1040, max_pool3d_607], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2429 = torch.ops.aten.max_pool3d_with_indices.default(buf2428, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2428
buf2430 = buf2429[0]
del buf2429
# Topologically Sorted Source Nodes: [p1_172, p3_172, min_345, p2_172, img_85, neg_1042, max_pool3d_608], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2433 = torch.ops.aten.max_pool3d_with_indices.default(buf2432, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2432
buf2434 = buf2433[0]
del buf2433
buf2436 = buf2426; del buf2426 # reuse
# Topologically Sorted Source Nodes: [p1_173, p3_173, min_347, p2_173, min_348], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf2436, buf2434, buf2430, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_173, p3_173, min_347, p2_173, min_348, img1_87], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2437 = torch.ops.aten.max_pool3d_with_indices.default(buf2436, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2438 = buf2437[0]
del buf2437
# Topologically Sorted Source Nodes: [p1_172, p3_172, min_345, p2_172, img_85, neg_1044, max_pool3d_610], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2441 = torch.ops.aten.max_pool3d_with_indices.default(buf2440, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2442 = buf2441[0]
del buf2441
# Topologically Sorted Source Nodes: [p1_172, p3_172, min_345, p2_172, img_85, neg_1046, max_pool3d_611], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2445 = torch.ops.aten.max_pool3d_with_indices.default(buf2444, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2446 = buf2445[0]
del buf2445
# Topologically Sorted Source Nodes: [p1_172, p3_172, min_345, p2_172, img_85, neg_1048, max_pool3d_612], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2449 = torch.ops.aten.max_pool3d_with_indices.default(buf2448, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2450 = buf2449[0]
del buf2449
buf2452 = buf2448; del buf2448 # reuse
buf2456 = buf2444; del buf2444 # reuse
buf2460 = buf2440; del buf2440 # reuse
buf2468 = buf2436; del buf2436 # reuse
buf2472 = buf2434; del buf2434 # reuse
buf2476 = buf2430; del buf2430 # reuse
# Topologically Sorted Source Nodes: [p1_174, p3_174, min_349, p2_174, img_86, neg_1050, neg_1052, neg_1054, neg_1056, neg_1058, neg_1060], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf2442, buf2450, buf2446, buf2452, buf2456, buf2460, buf2468, buf2472, buf2476, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_174, p3_174, min_349, p2_174, img_86, neg_1050, max_pool3d_613], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2453 = torch.ops.aten.max_pool3d_with_indices.default(buf2452, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2452
buf2454 = buf2453[0]
del buf2453
# Topologically Sorted Source Nodes: [p1_174, p3_174, min_349, p2_174, img_86, neg_1052, max_pool3d_614], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2457 = torch.ops.aten.max_pool3d_with_indices.default(buf2456, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2456
buf2458 = buf2457[0]
del buf2457
# Topologically Sorted Source Nodes: [p1_174, p3_174, min_349, p2_174, img_86, neg_1054, max_pool3d_615], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2461 = torch.ops.aten.max_pool3d_with_indices.default(buf2460, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2460
buf2462 = buf2461[0]
del buf2461
buf2464 = buf2454; del buf2454 # reuse
# Topologically Sorted Source Nodes: [p1_175, p3_175, min_351, p2_175, min_352], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf2464, buf2462, buf2458, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_175, p3_175, min_351, p2_175, min_352, img1_88], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2465 = torch.ops.aten.max_pool3d_with_indices.default(buf2464, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2466 = buf2465[0]
del buf2465
# Topologically Sorted Source Nodes: [p1_174, p3_174, min_349, p2_174, img_86, neg_1056, max_pool3d_617], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2469 = torch.ops.aten.max_pool3d_with_indices.default(buf2468, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2470 = buf2469[0]
del buf2469
# Topologically Sorted Source Nodes: [p1_174, p3_174, min_349, p2_174, img_86, neg_1058, max_pool3d_618], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2473 = torch.ops.aten.max_pool3d_with_indices.default(buf2472, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2474 = buf2473[0]
del buf2473
# Topologically Sorted Source Nodes: [p1_174, p3_174, min_349, p2_174, img_86, neg_1060, max_pool3d_619], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2477 = torch.ops.aten.max_pool3d_with_indices.default(buf2476, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2478 = buf2477[0]
del buf2477
buf2480 = buf2476; del buf2476 # reuse
buf2484 = buf2472; del buf2472 # reuse
buf2488 = buf2468; del buf2468 # reuse
buf2496 = buf2464; del buf2464 # reuse
buf2500 = buf2462; del buf2462 # reuse
buf2504 = buf2458; del buf2458 # reuse
# Topologically Sorted Source Nodes: [p1_176, p3_176, min_353, p2_176, img_87, neg_1062, neg_1064, neg_1066, neg_1068, neg_1070, neg_1072], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf2470, buf2478, buf2474, buf2480, buf2484, buf2488, buf2496, buf2500, buf2504, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_176, p3_176, min_353, p2_176, img_87, neg_1062, max_pool3d_620], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2481 = torch.ops.aten.max_pool3d_with_indices.default(buf2480, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2480
buf2482 = buf2481[0]
del buf2481
# Topologically Sorted Source Nodes: [p1_176, p3_176, min_353, p2_176, img_87, neg_1064, max_pool3d_621], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2485 = torch.ops.aten.max_pool3d_with_indices.default(buf2484, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2484
buf2486 = buf2485[0]
del buf2485
# Topologically Sorted Source Nodes: [p1_176, p3_176, min_353, p2_176, img_87, neg_1066, max_pool3d_622], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2489 = torch.ops.aten.max_pool3d_with_indices.default(buf2488, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2488
buf2490 = buf2489[0]
del buf2489
buf2492 = buf2482; del buf2482 # reuse
# Topologically Sorted Source Nodes: [p1_177, p3_177, min_355, p2_177, min_356], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf2492, buf2490, buf2486, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_177, p3_177, min_355, p2_177, min_356, img1_89], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2493 = torch.ops.aten.max_pool3d_with_indices.default(buf2492, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2494 = buf2493[0]
del buf2493
# Topologically Sorted Source Nodes: [p1_176, p3_176, min_353, p2_176, img_87, neg_1068, max_pool3d_624], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2497 = torch.ops.aten.max_pool3d_with_indices.default(buf2496, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2498 = buf2497[0]
del buf2497
# Topologically Sorted Source Nodes: [p1_176, p3_176, min_353, p2_176, img_87, neg_1070, max_pool3d_625], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2501 = torch.ops.aten.max_pool3d_with_indices.default(buf2500, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2502 = buf2501[0]
del buf2501
# Topologically Sorted Source Nodes: [p1_176, p3_176, min_353, p2_176, img_87, neg_1072, max_pool3d_626], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2505 = torch.ops.aten.max_pool3d_with_indices.default(buf2504, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2506 = buf2505[0]
del buf2505
buf2508 = buf2504; del buf2504 # reuse
buf2512 = buf2500; del buf2500 # reuse
buf2516 = buf2496; del buf2496 # reuse
buf2524 = buf2492; del buf2492 # reuse
buf2528 = buf2490; del buf2490 # reuse
buf2532 = buf2486; del buf2486 # reuse
# Topologically Sorted Source Nodes: [p1_178, p3_178, min_357, p2_178, img_88, neg_1074, neg_1076, neg_1078, neg_1080, neg_1082, neg_1084], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf2498, buf2506, buf2502, buf2508, buf2512, buf2516, buf2524, buf2528, buf2532, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_178, p3_178, min_357, p2_178, img_88, neg_1074, max_pool3d_627], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2509 = torch.ops.aten.max_pool3d_with_indices.default(buf2508, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2508
buf2510 = buf2509[0]
del buf2509
# Topologically Sorted Source Nodes: [p1_178, p3_178, min_357, p2_178, img_88, neg_1076, max_pool3d_628], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2513 = torch.ops.aten.max_pool3d_with_indices.default(buf2512, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2512
buf2514 = buf2513[0]
del buf2513
# Topologically Sorted Source Nodes: [p1_178, p3_178, min_357, p2_178, img_88, neg_1078, max_pool3d_629], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2517 = torch.ops.aten.max_pool3d_with_indices.default(buf2516, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2516
buf2518 = buf2517[0]
del buf2517
buf2520 = buf2510; del buf2510 # reuse
# Topologically Sorted Source Nodes: [p1_179, p3_179, min_359, p2_179, min_360], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf2520, buf2518, buf2514, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_179, p3_179, min_359, p2_179, min_360, img1_90], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2521 = torch.ops.aten.max_pool3d_with_indices.default(buf2520, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2522 = buf2521[0]
del buf2521
# Topologically Sorted Source Nodes: [p1_178, p3_178, min_357, p2_178, img_88, neg_1080, max_pool3d_631], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2525 = torch.ops.aten.max_pool3d_with_indices.default(buf2524, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2526 = buf2525[0]
del buf2525
# Topologically Sorted Source Nodes: [p1_178, p3_178, min_357, p2_178, img_88, neg_1082, max_pool3d_632], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2529 = torch.ops.aten.max_pool3d_with_indices.default(buf2528, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2530 = buf2529[0]
del buf2529
# Topologically Sorted Source Nodes: [p1_178, p3_178, min_357, p2_178, img_88, neg_1084, max_pool3d_633], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2533 = torch.ops.aten.max_pool3d_with_indices.default(buf2532, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2534 = buf2533[0]
del buf2533
buf2536 = buf2532; del buf2532 # reuse
buf2540 = buf2528; del buf2528 # reuse
buf2544 = buf2524; del buf2524 # reuse
buf2552 = buf2520; del buf2520 # reuse
buf2556 = buf2518; del buf2518 # reuse
buf2560 = buf2514; del buf2514 # reuse
# Topologically Sorted Source Nodes: [p1_180, p3_180, min_361, p2_180, img_89, neg_1086, neg_1088, neg_1090, neg_1092, neg_1094, neg_1096], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf2526, buf2534, buf2530, buf2536, buf2540, buf2544, buf2552, buf2556, buf2560, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_180, p3_180, min_361, p2_180, img_89, neg_1086, max_pool3d_634], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2537 = torch.ops.aten.max_pool3d_with_indices.default(buf2536, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2536
buf2538 = buf2537[0]
del buf2537
# Topologically Sorted Source Nodes: [p1_180, p3_180, min_361, p2_180, img_89, neg_1088, max_pool3d_635], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2541 = torch.ops.aten.max_pool3d_with_indices.default(buf2540, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2540
buf2542 = buf2541[0]
del buf2541
# Topologically Sorted Source Nodes: [p1_180, p3_180, min_361, p2_180, img_89, neg_1090, max_pool3d_636], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2545 = torch.ops.aten.max_pool3d_with_indices.default(buf2544, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2544
buf2546 = buf2545[0]
del buf2545
buf2548 = buf2538; del buf2538 # reuse
# Topologically Sorted Source Nodes: [p1_181, p3_181, min_363, p2_181, min_364], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf2548, buf2546, buf2542, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_181, p3_181, min_363, p2_181, min_364, img1_91], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2549 = torch.ops.aten.max_pool3d_with_indices.default(buf2548, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2550 = buf2549[0]
del buf2549
# Topologically Sorted Source Nodes: [p1_180, p3_180, min_361, p2_180, img_89, neg_1092, max_pool3d_638], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2553 = torch.ops.aten.max_pool3d_with_indices.default(buf2552, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2554 = buf2553[0]
del buf2553
# Topologically Sorted Source Nodes: [p1_180, p3_180, min_361, p2_180, img_89, neg_1094, max_pool3d_639], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2557 = torch.ops.aten.max_pool3d_with_indices.default(buf2556, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2558 = buf2557[0]
del buf2557
# Topologically Sorted Source Nodes: [p1_180, p3_180, min_361, p2_180, img_89, neg_1096, max_pool3d_640], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2561 = torch.ops.aten.max_pool3d_with_indices.default(buf2560, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2562 = buf2561[0]
del buf2561
buf2564 = buf2560; del buf2560 # reuse
buf2568 = buf2556; del buf2556 # reuse
buf2572 = buf2552; del buf2552 # reuse
buf2580 = buf2548; del buf2548 # reuse
buf2584 = buf2546; del buf2546 # reuse
buf2588 = buf2542; del buf2542 # reuse
# Topologically Sorted Source Nodes: [p1_182, p3_182, min_365, p2_182, img_90, neg_1098, neg_1100, neg_1102, neg_1104, neg_1106, neg_1108], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf2554, buf2562, buf2558, buf2564, buf2568, buf2572, buf2580, buf2584, buf2588, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_182, p3_182, min_365, p2_182, img_90, neg_1098, max_pool3d_641], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2565 = torch.ops.aten.max_pool3d_with_indices.default(buf2564, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2564
buf2566 = buf2565[0]
del buf2565
# Topologically Sorted Source Nodes: [p1_182, p3_182, min_365, p2_182, img_90, neg_1100, max_pool3d_642], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2569 = torch.ops.aten.max_pool3d_with_indices.default(buf2568, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2568
buf2570 = buf2569[0]
del buf2569
# Topologically Sorted Source Nodes: [p1_182, p3_182, min_365, p2_182, img_90, neg_1102, max_pool3d_643], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2573 = torch.ops.aten.max_pool3d_with_indices.default(buf2572, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2572
buf2574 = buf2573[0]
del buf2573
buf2576 = buf2566; del buf2566 # reuse
# Topologically Sorted Source Nodes: [p1_183, p3_183, min_367, p2_183, min_368], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf2576, buf2574, buf2570, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_183, p3_183, min_367, p2_183, min_368, img1_92], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2577 = torch.ops.aten.max_pool3d_with_indices.default(buf2576, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2578 = buf2577[0]
del buf2577
# Topologically Sorted Source Nodes: [p1_182, p3_182, min_365, p2_182, img_90, neg_1104, max_pool3d_645], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2581 = torch.ops.aten.max_pool3d_with_indices.default(buf2580, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2582 = buf2581[0]
del buf2581
# Topologically Sorted Source Nodes: [p1_182, p3_182, min_365, p2_182, img_90, neg_1106, max_pool3d_646], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2585 = torch.ops.aten.max_pool3d_with_indices.default(buf2584, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2586 = buf2585[0]
del buf2585
# Topologically Sorted Source Nodes: [p1_182, p3_182, min_365, p2_182, img_90, neg_1108, max_pool3d_647], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2589 = torch.ops.aten.max_pool3d_with_indices.default(buf2588, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2590 = buf2589[0]
del buf2589
buf2592 = buf2588; del buf2588 # reuse
buf2596 = buf2584; del buf2584 # reuse
buf2600 = buf2580; del buf2580 # reuse
buf2608 = buf2576; del buf2576 # reuse
buf2612 = buf2574; del buf2574 # reuse
buf2616 = buf2570; del buf2570 # reuse
# Topologically Sorted Source Nodes: [p1_184, p3_184, min_369, p2_184, img_91, neg_1110, neg_1112, neg_1114, neg_1116, neg_1118, neg_1120], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf2582, buf2590, buf2586, buf2592, buf2596, buf2600, buf2608, buf2612, buf2616, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_184, p3_184, min_369, p2_184, img_91, neg_1110, max_pool3d_648], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2593 = torch.ops.aten.max_pool3d_with_indices.default(buf2592, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2592
buf2594 = buf2593[0]
del buf2593
# Topologically Sorted Source Nodes: [p1_184, p3_184, min_369, p2_184, img_91, neg_1112, max_pool3d_649], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2597 = torch.ops.aten.max_pool3d_with_indices.default(buf2596, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2596
buf2598 = buf2597[0]
del buf2597
# Topologically Sorted Source Nodes: [p1_184, p3_184, min_369, p2_184, img_91, neg_1114, max_pool3d_650], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2601 = torch.ops.aten.max_pool3d_with_indices.default(buf2600, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2600
buf2602 = buf2601[0]
del buf2601
buf2604 = buf2594; del buf2594 # reuse
# Topologically Sorted Source Nodes: [p1_185, p3_185, min_371, p2_185, min_372], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf2604, buf2602, buf2598, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_185, p3_185, min_371, p2_185, min_372, img1_93], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2605 = torch.ops.aten.max_pool3d_with_indices.default(buf2604, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2606 = buf2605[0]
del buf2605
# Topologically Sorted Source Nodes: [p1_184, p3_184, min_369, p2_184, img_91, neg_1116, max_pool3d_652], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2609 = torch.ops.aten.max_pool3d_with_indices.default(buf2608, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2610 = buf2609[0]
del buf2609
# Topologically Sorted Source Nodes: [p1_184, p3_184, min_369, p2_184, img_91, neg_1118, max_pool3d_653], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2613 = torch.ops.aten.max_pool3d_with_indices.default(buf2612, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2614 = buf2613[0]
del buf2613
# Topologically Sorted Source Nodes: [p1_184, p3_184, min_369, p2_184, img_91, neg_1120, max_pool3d_654], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2617 = torch.ops.aten.max_pool3d_with_indices.default(buf2616, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2618 = buf2617[0]
del buf2617
buf2620 = buf2616; del buf2616 # reuse
buf2624 = buf2612; del buf2612 # reuse
buf2628 = buf2608; del buf2608 # reuse
buf2636 = buf2604; del buf2604 # reuse
buf2640 = buf2602; del buf2602 # reuse
buf2644 = buf2598; del buf2598 # reuse
# Topologically Sorted Source Nodes: [p1_186, p3_186, min_373, p2_186, img_92, neg_1122, neg_1124, neg_1126, neg_1128, neg_1130, neg_1132], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf2610, buf2618, buf2614, buf2620, buf2624, buf2628, buf2636, buf2640, buf2644, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_186, p3_186, min_373, p2_186, img_92, neg_1122, max_pool3d_655], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2621 = torch.ops.aten.max_pool3d_with_indices.default(buf2620, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2620
buf2622 = buf2621[0]
del buf2621
# Topologically Sorted Source Nodes: [p1_186, p3_186, min_373, p2_186, img_92, neg_1124, max_pool3d_656], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2625 = torch.ops.aten.max_pool3d_with_indices.default(buf2624, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2624
buf2626 = buf2625[0]
del buf2625
# Topologically Sorted Source Nodes: [p1_186, p3_186, min_373, p2_186, img_92, neg_1126, max_pool3d_657], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2629 = torch.ops.aten.max_pool3d_with_indices.default(buf2628, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2628
buf2630 = buf2629[0]
del buf2629
buf2632 = buf2622; del buf2622 # reuse
# Topologically Sorted Source Nodes: [p1_187, p3_187, min_375, p2_187, min_376], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf2632, buf2630, buf2626, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_187, p3_187, min_375, p2_187, min_376, img1_94], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2633 = torch.ops.aten.max_pool3d_with_indices.default(buf2632, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2634 = buf2633[0]
del buf2633
# Topologically Sorted Source Nodes: [p1_186, p3_186, min_373, p2_186, img_92, neg_1128, max_pool3d_659], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2637 = torch.ops.aten.max_pool3d_with_indices.default(buf2636, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2638 = buf2637[0]
del buf2637
# Topologically Sorted Source Nodes: [p1_186, p3_186, min_373, p2_186, img_92, neg_1130, max_pool3d_660], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2641 = torch.ops.aten.max_pool3d_with_indices.default(buf2640, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2642 = buf2641[0]
del buf2641
# Topologically Sorted Source Nodes: [p1_186, p3_186, min_373, p2_186, img_92, neg_1132, max_pool3d_661], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2645 = torch.ops.aten.max_pool3d_with_indices.default(buf2644, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2646 = buf2645[0]
del buf2645
buf2648 = buf2644; del buf2644 # reuse
buf2652 = buf2640; del buf2640 # reuse
buf2656 = buf2636; del buf2636 # reuse
buf2664 = buf2632; del buf2632 # reuse
buf2668 = buf2630; del buf2630 # reuse
buf2672 = buf2626; del buf2626 # reuse
# Topologically Sorted Source Nodes: [p1_188, p3_188, min_377, p2_188, img_93, neg_1134, neg_1136, neg_1138, neg_1140, neg_1142, neg_1144], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf2638, buf2646, buf2642, buf2648, buf2652, buf2656, buf2664, buf2668, buf2672, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_188, p3_188, min_377, p2_188, img_93, neg_1134, max_pool3d_662], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2649 = torch.ops.aten.max_pool3d_with_indices.default(buf2648, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2648
buf2650 = buf2649[0]
del buf2649
# Topologically Sorted Source Nodes: [p1_188, p3_188, min_377, p2_188, img_93, neg_1136, max_pool3d_663], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2653 = torch.ops.aten.max_pool3d_with_indices.default(buf2652, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2652
buf2654 = buf2653[0]
del buf2653
# Topologically Sorted Source Nodes: [p1_188, p3_188, min_377, p2_188, img_93, neg_1138, max_pool3d_664], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2657 = torch.ops.aten.max_pool3d_with_indices.default(buf2656, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2656
buf2658 = buf2657[0]
del buf2657
buf2660 = buf2650; del buf2650 # reuse
# Topologically Sorted Source Nodes: [p1_189, p3_189, min_379, p2_189, min_380], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf2660, buf2658, buf2654, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_189, p3_189, min_379, p2_189, min_380, img1_95], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2661 = torch.ops.aten.max_pool3d_with_indices.default(buf2660, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2662 = buf2661[0]
del buf2661
# Topologically Sorted Source Nodes: [p1_188, p3_188, min_377, p2_188, img_93, neg_1140, max_pool3d_666], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2665 = torch.ops.aten.max_pool3d_with_indices.default(buf2664, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2666 = buf2665[0]
del buf2665
# Topologically Sorted Source Nodes: [p1_188, p3_188, min_377, p2_188, img_93, neg_1142, max_pool3d_667], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2669 = torch.ops.aten.max_pool3d_with_indices.default(buf2668, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2670 = buf2669[0]
del buf2669
# Topologically Sorted Source Nodes: [p1_188, p3_188, min_377, p2_188, img_93, neg_1144, max_pool3d_668], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2673 = torch.ops.aten.max_pool3d_with_indices.default(buf2672, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2674 = buf2673[0]
del buf2673
buf2676 = buf2672; del buf2672 # reuse
buf2680 = buf2668; del buf2668 # reuse
buf2684 = buf2664; del buf2664 # reuse
buf2692 = buf2660; del buf2660 # reuse
buf2696 = buf2658; del buf2658 # reuse
buf2700 = buf2654; del buf2654 # reuse
# Topologically Sorted Source Nodes: [p1_190, p3_190, min_381, p2_190, img_94, neg_1146, neg_1148, neg_1150, neg_1152, neg_1154, neg_1156], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf2666, buf2674, buf2670, buf2676, buf2680, buf2684, buf2692, buf2696, buf2700, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_190, p3_190, min_381, p2_190, img_94, neg_1146, max_pool3d_669], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2677 = torch.ops.aten.max_pool3d_with_indices.default(buf2676, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2676
buf2678 = buf2677[0]
del buf2677
# Topologically Sorted Source Nodes: [p1_190, p3_190, min_381, p2_190, img_94, neg_1148, max_pool3d_670], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2681 = torch.ops.aten.max_pool3d_with_indices.default(buf2680, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2680
buf2682 = buf2681[0]
del buf2681
# Topologically Sorted Source Nodes: [p1_190, p3_190, min_381, p2_190, img_94, neg_1150, max_pool3d_671], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2685 = torch.ops.aten.max_pool3d_with_indices.default(buf2684, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2684
buf2686 = buf2685[0]
del buf2685
buf2688 = buf2678; del buf2678 # reuse
# Topologically Sorted Source Nodes: [p1_191, p3_191, min_383, p2_191, min_384], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf2688, buf2686, buf2682, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_191, p3_191, min_383, p2_191, min_384, img1_96], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2689 = torch.ops.aten.max_pool3d_with_indices.default(buf2688, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2690 = buf2689[0]
del buf2689
# Topologically Sorted Source Nodes: [p1_190, p3_190, min_381, p2_190, img_94, neg_1152, max_pool3d_673], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2693 = torch.ops.aten.max_pool3d_with_indices.default(buf2692, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2694 = buf2693[0]
del buf2693
# Topologically Sorted Source Nodes: [p1_190, p3_190, min_381, p2_190, img_94, neg_1154, max_pool3d_674], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2697 = torch.ops.aten.max_pool3d_with_indices.default(buf2696, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2698 = buf2697[0]
del buf2697
# Topologically Sorted Source Nodes: [p1_190, p3_190, min_381, p2_190, img_94, neg_1156, max_pool3d_675], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2701 = torch.ops.aten.max_pool3d_with_indices.default(buf2700, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2702 = buf2701[0]
del buf2701
buf2704 = buf2700; del buf2700 # reuse
buf2708 = buf2696; del buf2696 # reuse
buf2712 = buf2692; del buf2692 # reuse
buf2720 = buf2688; del buf2688 # reuse
buf2724 = buf2686; del buf2686 # reuse
buf2728 = buf2682; del buf2682 # reuse
# Topologically Sorted Source Nodes: [p1_192, p3_192, min_385, p2_192, img_95, neg_1158, neg_1160, neg_1162, neg_1164, neg_1166, neg_1168], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf2694, buf2702, buf2698, buf2704, buf2708, buf2712, buf2720, buf2724, buf2728, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_192, p3_192, min_385, p2_192, img_95, neg_1158, max_pool3d_676], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2705 = torch.ops.aten.max_pool3d_with_indices.default(buf2704, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2704
buf2706 = buf2705[0]
del buf2705
# Topologically Sorted Source Nodes: [p1_192, p3_192, min_385, p2_192, img_95, neg_1160, max_pool3d_677], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2709 = torch.ops.aten.max_pool3d_with_indices.default(buf2708, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2708
buf2710 = buf2709[0]
del buf2709
# Topologically Sorted Source Nodes: [p1_192, p3_192, min_385, p2_192, img_95, neg_1162, max_pool3d_678], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2713 = torch.ops.aten.max_pool3d_with_indices.default(buf2712, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2712
buf2714 = buf2713[0]
del buf2713
buf2716 = buf2706; del buf2706 # reuse
# Topologically Sorted Source Nodes: [p1_193, p3_193, min_387, p2_193, min_388], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf2716, buf2714, buf2710, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_193, p3_193, min_387, p2_193, min_388, img1_97], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2717 = torch.ops.aten.max_pool3d_with_indices.default(buf2716, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2718 = buf2717[0]
del buf2717
# Topologically Sorted Source Nodes: [p1_192, p3_192, min_385, p2_192, img_95, neg_1164, max_pool3d_680], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2721 = torch.ops.aten.max_pool3d_with_indices.default(buf2720, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2722 = buf2721[0]
del buf2721
# Topologically Sorted Source Nodes: [p1_192, p3_192, min_385, p2_192, img_95, neg_1166, max_pool3d_681], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2725 = torch.ops.aten.max_pool3d_with_indices.default(buf2724, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2726 = buf2725[0]
del buf2725
# Topologically Sorted Source Nodes: [p1_192, p3_192, min_385, p2_192, img_95, neg_1168, max_pool3d_682], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2729 = torch.ops.aten.max_pool3d_with_indices.default(buf2728, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2730 = buf2729[0]
del buf2729
buf2732 = buf2728; del buf2728 # reuse
buf2736 = buf2724; del buf2724 # reuse
buf2740 = buf2720; del buf2720 # reuse
buf2748 = buf2716; del buf2716 # reuse
buf2752 = buf2714; del buf2714 # reuse
buf2756 = buf2710; del buf2710 # reuse
# Topologically Sorted Source Nodes: [p1_194, p3_194, min_389, p2_194, img_96, neg_1170, neg_1172, neg_1174, neg_1176, neg_1178, neg_1180], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf2722, buf2730, buf2726, buf2732, buf2736, buf2740, buf2748, buf2752, buf2756, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_194, p3_194, min_389, p2_194, img_96, neg_1170, max_pool3d_683], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2733 = torch.ops.aten.max_pool3d_with_indices.default(buf2732, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2732
buf2734 = buf2733[0]
del buf2733
# Topologically Sorted Source Nodes: [p1_194, p3_194, min_389, p2_194, img_96, neg_1172, max_pool3d_684], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2737 = torch.ops.aten.max_pool3d_with_indices.default(buf2736, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2736
buf2738 = buf2737[0]
del buf2737
# Topologically Sorted Source Nodes: [p1_194, p3_194, min_389, p2_194, img_96, neg_1174, max_pool3d_685], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2741 = torch.ops.aten.max_pool3d_with_indices.default(buf2740, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2740
buf2742 = buf2741[0]
del buf2741
buf2744 = buf2734; del buf2734 # reuse
# Topologically Sorted Source Nodes: [p1_195, p3_195, min_391, p2_195, min_392], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf2744, buf2742, buf2738, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_195, p3_195, min_391, p2_195, min_392, img1_98], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2745 = torch.ops.aten.max_pool3d_with_indices.default(buf2744, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2746 = buf2745[0]
del buf2745
# Topologically Sorted Source Nodes: [p1_194, p3_194, min_389, p2_194, img_96, neg_1176, max_pool3d_687], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2749 = torch.ops.aten.max_pool3d_with_indices.default(buf2748, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2750 = buf2749[0]
del buf2749
# Topologically Sorted Source Nodes: [p1_194, p3_194, min_389, p2_194, img_96, neg_1178, max_pool3d_688], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2753 = torch.ops.aten.max_pool3d_with_indices.default(buf2752, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2754 = buf2753[0]
del buf2753
# Topologically Sorted Source Nodes: [p1_194, p3_194, min_389, p2_194, img_96, neg_1180, max_pool3d_689], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2757 = torch.ops.aten.max_pool3d_with_indices.default(buf2756, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2758 = buf2757[0]
del buf2757
buf2760 = buf2756; del buf2756 # reuse
buf2764 = buf2752; del buf2752 # reuse
buf2768 = buf2748; del buf2748 # reuse
buf2776 = buf2744; del buf2744 # reuse
buf2780 = buf2742; del buf2742 # reuse
buf2784 = buf2738; del buf2738 # reuse
# Topologically Sorted Source Nodes: [p1_196, p3_196, min_393, p2_196, img_97, neg_1182, neg_1184, neg_1186, neg_1188, neg_1190, neg_1192], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf2750, buf2758, buf2754, buf2760, buf2764, buf2768, buf2776, buf2780, buf2784, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_196, p3_196, min_393, p2_196, img_97, neg_1182, max_pool3d_690], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2761 = torch.ops.aten.max_pool3d_with_indices.default(buf2760, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2760
buf2762 = buf2761[0]
del buf2761
# Topologically Sorted Source Nodes: [p1_196, p3_196, min_393, p2_196, img_97, neg_1184, max_pool3d_691], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2765 = torch.ops.aten.max_pool3d_with_indices.default(buf2764, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2764
buf2766 = buf2765[0]
del buf2765
# Topologically Sorted Source Nodes: [p1_196, p3_196, min_393, p2_196, img_97, neg_1186, max_pool3d_692], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2769 = torch.ops.aten.max_pool3d_with_indices.default(buf2768, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2768
buf2770 = buf2769[0]
del buf2769
buf2772 = buf2762; del buf2762 # reuse
# Topologically Sorted Source Nodes: [p1_197, p3_197, min_395, p2_197, min_396], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf2772, buf2770, buf2766, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_197, p3_197, min_395, p2_197, min_396, img1_99], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2773 = torch.ops.aten.max_pool3d_with_indices.default(buf2772, [3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2774 = buf2773[0]
del buf2773
# Topologically Sorted Source Nodes: [p1_196, p3_196, min_393, p2_196, img_97, neg_1188, max_pool3d_694], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2777 = torch.ops.aten.max_pool3d_with_indices.default(buf2776, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2778 = buf2777[0]
del buf2777
# Topologically Sorted Source Nodes: [p1_196, p3_196, min_393, p2_196, img_97, neg_1190, max_pool3d_695], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2781 = torch.ops.aten.max_pool3d_with_indices.default(buf2780, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2782 = buf2781[0]
del buf2781
# Topologically Sorted Source Nodes: [p1_196, p3_196, min_393, p2_196, img_97, neg_1192, max_pool3d_696], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2785 = torch.ops.aten.max_pool3d_with_indices.default(buf2784, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2786 = buf2785[0]
del buf2785
buf2788 = buf2784; del buf2784 # reuse
buf2792 = buf2780; del buf2780 # reuse
buf2796 = buf2776; del buf2776 # reuse
buf2804 = buf2772; del buf2772 # reuse
buf2808 = buf2770; del buf2770 # reuse
buf2812 = buf2766; del buf2766 # reuse
# Topologically Sorted Source Nodes: [p1_198, p3_198, min_397, p2_198, img_98, neg_1194, neg_1196, neg_1198, neg_1200, neg_1202, neg_1204], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_2.run(buf2778, buf2786, buf2782, buf2788, buf2792, buf2796, buf2804, buf2808, buf2812, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_198, p3_198, min_397, p2_198, img_98, neg_1194, max_pool3d_697], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2789 = torch.ops.aten.max_pool3d_with_indices.default(buf2788, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2788
buf2790 = buf2789[0]
del buf2789
# Topologically Sorted Source Nodes: [p1_198, p3_198, min_397, p2_198, img_98, neg_1196, max_pool3d_698], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2793 = torch.ops.aten.max_pool3d_with_indices.default(buf2792, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2792
buf2794 = buf2793[0]
del buf2793
# Topologically Sorted Source Nodes: [p1_198, p3_198, min_397, p2_198, img_98, neg_1198, max_pool3d_699], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2797 = torch.ops.aten.max_pool3d_with_indices.default(buf2796, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2796
buf2798 = buf2797[0]
del buf2797
buf2800 = buf2790; del buf2790 # reuse
# Topologically Sorted Source Nodes: [p1_199, p3_199, min_399, p2_199, min_400], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf2800, buf2798, buf2794, 256, grid=grid(256), stream=stream0)
del buf2794
del buf2798
# Topologically Sorted Source Nodes: [p1_199, p3_199, min_399, p2_199, min_400, img1_100], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2801 = torch.ops.aten.max_pool3d_with_indices.default(buf2800, [3, 3, 3], [1, 1, 1], [1, 1, 1])
del buf2800
buf2802 = buf2801[0]
del buf2801
# Topologically Sorted Source Nodes: [p1_198, p3_198, min_397, p2_198, img_98, neg_1200, max_pool3d_701], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2805 = torch.ops.aten.max_pool3d_with_indices.default(buf2804, [3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2806 = buf2805[0]
del buf2805
# Topologically Sorted Source Nodes: [p1_198, p3_198, min_397, p2_198, img_98, neg_1202, max_pool3d_702], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2809 = torch.ops.aten.max_pool3d_with_indices.default(buf2808, [1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2810 = buf2809[0]
del buf2809
# Topologically Sorted Source Nodes: [p1_198, p3_198, min_397, p2_198, img_98, neg_1204, max_pool3d_703], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2813 = torch.ops.aten.max_pool3d_with_indices.default(buf2812, [1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2814 = buf2813[0]
del buf2813
buf2816 = buf2812; del buf2812 # reuse
buf2820 = buf2808; del buf2808 # reuse
buf2824 = buf2804; del buf2804 # reuse
# Topologically Sorted Source Nodes: [p1_200, p3_200, min_401, p2_200, img_99, neg_1206, neg_1208, neg_1210], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_4.run(buf2806, buf2814, buf2810, buf2816, buf2820, buf2824, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [p1_200, p3_200, min_401, p2_200, img_99, neg_1206, max_pool3d_704], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2817 = torch.ops.aten.max_pool3d_with_indices.default(buf2816, [3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2816
buf2818 = buf2817[0]
del buf2817
# Topologically Sorted Source Nodes: [p1_200, p3_200, min_401, p2_200, img_99, neg_1208, max_pool3d_705], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2821 = torch.ops.aten.max_pool3d_with_indices.default(buf2820, [1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2820
buf2822 = buf2821[0]
del buf2821
# Topologically Sorted Source Nodes: [p1_200, p3_200, min_401, p2_200, img_99, neg_1210, max_pool3d_706], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2825 = torch.ops.aten.max_pool3d_with_indices.default(buf2824, [1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2824
buf2826 = buf2825[0]
del buf2825
buf2828 = buf2818; del buf2818 # reuse
# Topologically Sorted Source Nodes: [p1_201, p3_201, min_403, p2_201, min_404], Original ATen: [aten.neg, aten.minimum]
triton_poi_fused_minimum_neg_3.run(buf2828, buf2826, buf2822, 256, grid=grid(256), stream=stream0)
del buf2822
del buf2826
# Topologically Sorted Source Nodes: [p1_201, p3_201, min_403, p2_201, min_404, img1_101], Original ATen: [aten.neg, aten.minimum, aten.max_pool3d_with_indices]
buf2829 = torch.ops.aten.max_pool3d_with_indices.default(buf2828, [3, 3, 3], [1, 1, 1], [1, 1, 1])
del buf2828
buf2830 = buf2829[0]
del buf2829
buf2832 = buf1430; del buf1430 # reuse
buf2833 = buf1462; del buf1462 # reuse
buf2834 = buf1490; del buf1490 # reuse
buf2835 = buf1518; del buf1518 # reuse
buf2836 = buf1546; del buf1546 # reuse
buf2837 = buf1574; del buf1574 # reuse
buf2838 = buf1602; del buf1602 # reuse
buf2839 = buf1630; del buf1630 # reuse
buf2840 = buf1658; del buf1658 # reuse
buf2841 = buf1686; del buf1686 # reuse
buf2842 = buf1714; del buf1714 # reuse
buf2843 = buf1742; del buf1742 # reuse
buf2844 = buf1770; del buf1770 # reuse
buf2845 = buf1798; del buf1798 # reuse
buf2846 = buf1826; del buf1826 # reuse
buf2847 = buf1854; del buf1854 # reuse
buf2848 = buf1882; del buf1882 # reuse
buf2849 = buf1910; del buf1910 # reuse
buf2850 = buf1938; del buf1938 # reuse
buf2851 = buf1966; del buf1966 # reuse
buf2852 = buf1994; del buf1994 # reuse
buf2853 = buf2022; del buf2022 # reuse
buf2854 = buf2050; del buf2050 # reuse
buf2855 = buf2078; del buf2078 # reuse
buf2856 = buf2106; del buf2106 # reuse
buf2857 = buf2134; del buf2134 # reuse
buf2858 = buf2162; del buf2162 # reuse
buf2859 = buf2190; del buf2190 # reuse
buf2860 = buf2218; del buf2218 # reuse
buf2861 = buf2246; del buf2246 # reuse
buf2862 = buf2274; del buf2274 # reuse
buf2863 = buf2302; del buf2302 # reuse
buf2864 = buf2330; del buf2330 # reuse
buf2865 = buf2358; del buf2358 # reuse
buf2866 = buf2386; del buf2386 # reuse
buf2867 = buf2414; del buf2414 # reuse
buf2868 = buf2442; del buf2442 # reuse
buf2869 = buf2470; del buf2470 # reuse
buf2870 = buf2498; del buf2498 # reuse
buf2871 = buf2526; del buf2526 # reuse
buf2872 = buf2554; del buf2554 # reuse
buf2873 = buf2582; del buf2582 # reuse
buf2874 = buf2610; del buf2610 # reuse
buf2875 = buf2638; del buf2638 # reuse
buf2876 = buf2666; del buf2666 # reuse
buf2877 = buf2694; del buf2694 # reuse
buf2878 = buf2722; del buf2722 # reuse
buf2879 = buf2750; del buf2750 # reuse
buf2880 = buf2778; del buf2778 # reuse
buf2881 = buf2806; del buf2806 # reuse
# Topologically Sorted Source Nodes: [p1_102, p3_102, min_205, p2_102, img_50, p1_104, p3_104, min_209, p2_104, img_51, p1_106, p3_106, min_213, p2_106, img_52, p1_108, p3_108, min_217, p2_108, img_53, p1_110, p3_110, min_221, p2_110, img_54, p1_112, p3_112, min_225, p2_112, img_55, p1_114, p3_114, min_229, p2_114, img_56, p1_116, p3_116, min_233, p2_116, img_57, p1_118, p3_118, min_237, p2_118, img_58, p1_120, p3_120, min_241, p2_120, img_59, p1_122, p3_122, min_245, p2_122, img_60, p1_124, p3_124, min_249, p2_124, img_61, p1_126, p3_126, min_253, p2_126, img_62, p1_128, p3_128, min_257, p2_128, img_63, p1_130, p3_130, min_261, p2_130, img_64, p1_132, p3_132, min_265, p2_132, img_65, p1_134, p3_134, min_269, p2_134, img_66, p1_136, p3_136, min_273, p2_136, img_67, p1_138, p3_138, min_277, p2_138, img_68, p1_140, p3_140, min_281, p2_140, img_69, p1_142, p3_142, min_285, p2_142, img_70, p1_144, p3_144, min_289, p2_144, img_71, p1_146, p3_146, min_293, p2_146, img_72, p1_148, p3_148, min_297, p2_148, img_73, p1_150, p3_150, min_301, p2_150, img_74, p1_152, p3_152, min_305, p2_152, img_75, p1_154, p3_154, min_309, p2_154, img_76, p1_156, p3_156, min_313, p2_156, img_77, p1_158, p3_158, min_317, p2_158, img_78, p1_160, p3_160, min_321, p2_160, img_79, p1_162, p3_162, min_325, p2_162, img_80, p1_164, p3_164, min_329, p2_164, img_81, p1_166, p3_166, min_333, p2_166, img_82, p1_168, p3_168, min_337, p2_168, img_83, p1_170, p3_170, min_341, p2_170, img_84, p1_172, p3_172, min_345, p2_172, img_85, p1_174, p3_174, min_349, p2_174, img_86, p1_176, p3_176, min_353, p2_176, img_87, p1_178, p3_178, min_357, p2_178, img_88, p1_180, p3_180, min_361, p2_180, img_89, p1_182, p3_182, min_365, p2_182, img_90, p1_184, p3_184, min_369, p2_184, img_91, p1_186, p3_186, min_373, p2_186, img_92, p1_188, p3_188, min_377, p2_188, img_93, p1_190, p3_190, min_381, p2_190, img_94, p1_192, p3_192, min_385, p2_192, img_95, p1_194, p3_194, min_389, p2_194, img_96, p1_196, p3_196, min_393, p2_196, img_97, p1_198, p3_198, min_397, p2_198, img_98, p1_200, p3_200, min_401, p2_200, img_99, sub_101, skel_51, sub_102, delta_50, mul_50, sub_103, relu_103, skel_52, sub_104, delta_51, mul_51, sub_105, relu_105, skel_53, sub_106, delta_52, mul_52, sub_107, relu_107, skel_54, sub_108, delta_53, mul_53, sub_109, relu_109, skel_55, sub_110, delta_54, mul_54, sub_111, relu_111, skel_56, sub_112, delta_55, mul_55, sub_113, relu_113, skel_57, sub_114, delta_56, mul_56, sub_115, relu_115, skel_58, sub_116, delta_57, mul_57, sub_117, relu_117, skel_59, sub_118, delta_58, mul_58, sub_119, relu_119, skel_60, sub_120, delta_59, mul_59, sub_121, relu_121, skel_61, sub_122, delta_60, mul_60, sub_123, relu_123, skel_62, sub_124, delta_61, mul_61, sub_125, relu_125, skel_63, sub_126, delta_62, mul_62, sub_127, relu_127, skel_64, sub_128, delta_63, mul_63, sub_129, relu_129, skel_65, sub_130, delta_64, mul_64, sub_131, relu_131, skel_66, sub_132, delta_65, mul_65, sub_133, relu_133, skel_67, sub_134, delta_66, mul_66, sub_135, relu_135, skel_68, sub_136, delta_67, mul_67, sub_137, relu_137, skel_69, sub_138, delta_68, mul_68, sub_139, relu_139, skel_70, sub_140, delta_69, mul_69, sub_141, relu_141, skel_71, sub_142, delta_70, mul_70, sub_143, relu_143, skel_72, sub_144, delta_71, mul_71, sub_145, relu_145, skel_73, sub_146, delta_72, mul_72, sub_147, relu_147, skel_74, sub_148, delta_73, mul_73, sub_149, relu_149, skel_75, sub_150, delta_74, mul_74, sub_151, relu_151, skel_76, sub_152, delta_75, mul_75, sub_153, relu_153, skel_77, sub_154, delta_76, mul_76, sub_155, relu_155, skel_78, sub_156, delta_77, mul_77, sub_157, relu_157, skel_79, sub_158, delta_78, mul_78, sub_159, relu_159, skel_80, sub_160, delta_79, mul_79, sub_161, relu_161, skel_81, sub_162, delta_80, mul_80, sub_163, relu_163, skel_82, sub_164, delta_81, mul_81, sub_165, relu_165, skel_83, sub_166, delta_82, mul_82, sub_167, relu_167, skel_84, sub_168, delta_83, mul_83, sub_169, relu_169, skel_85, sub_170, delta_84, mul_84, sub_171, relu_171, skel_86, sub_172, delta_85, mul_85, sub_173, relu_173, skel_87, sub_174, delta_86, mul_86, sub_175, relu_175, skel_88, sub_176, delta_87, mul_87, sub_177, relu_177, skel_89, sub_178, delta_88, mul_88, sub_179, relu_179, skel_90, sub_180, delta_89, mul_89, sub_181, relu_181, skel_91, sub_182, delta_90, mul_90, sub_183, relu_183, skel_92, sub_184, delta_91, mul_91, sub_185, relu_185, skel_93, sub_186, delta_92, mul_92, sub_187, relu_187, skel_94, sub_188, delta_93, mul_93, sub_189, relu_189, skel_95, sub_190, delta_94, mul_94, sub_191, relu_191, skel_96, sub_192, delta_95, mul_95, sub_193, relu_193, skel_97, sub_194, delta_96, mul_96, sub_195, relu_195, skel_98, sub_196, delta_97, mul_97, sub_197, relu_197, skel_99, sub_198, delta_98, mul_98, sub_199, relu_199, skel_100, sub_200, delta_99, mul_99, sub_201, relu_201, skel_101], Original ATen: [aten.neg, aten.minimum, aten.sub, aten.relu, aten.mul, aten.add]
triton_poi_fused_add_minimum_mul_neg_relu_sub_5.run(buf2832, buf2833, buf2834, buf2835, buf2836, buf2837, buf2838, buf2839, buf2840, buf2841, buf2842, buf2843, buf2844, buf2845, buf2846, buf2847, buf2848, buf2849, buf2850, buf2851, buf2852, buf2853, buf2854, buf2855, buf2856, buf2857, buf2858, buf2859, buf2860, buf2861, buf2862, buf2863, buf2864, buf2865, buf2866, buf2867, buf2868, buf2869, buf2870, buf2871, buf2872, buf2873, buf2874, buf2875, buf2876, buf2877, buf2878, buf2879, buf2880, buf2881, arg1_1, buf1434, buf1442, buf1438, buf1458, buf1470, buf1466, buf1486, buf1498, buf1494, buf1514, buf1526, buf1522, buf1542, buf1554, buf1550, buf1570, buf1582, buf1578, buf1598, buf1610, buf1606, buf1626, buf1638, buf1634, buf1654, buf1666, buf1662, buf1682, buf1694, buf1690, buf1710, buf1722, buf1718, buf1738, buf1750, buf1746, buf1766, buf1778, buf1774, buf1794, buf1806, buf1802, buf1822, buf1834, buf1830, buf1850, buf1862, buf1858, buf1878, buf1890, buf1886, buf1906, buf1918, buf1914, buf1934, buf1946, buf1942, buf1962, buf1974, buf1970, buf1990, buf2002, buf1998, buf2018, buf2030, buf2026, buf2046, buf2058, buf2054, buf2074, buf2086, buf2082, buf2102, buf2114, buf2110, buf2130, buf2142, buf2138, buf2158, buf2170, buf2166, buf2186, buf2198, buf2194, buf2214, buf2226, buf2222, buf2242, buf2254, buf2250, buf2270, buf2282, buf2278, buf2298, buf2310, buf2306, buf2326, buf2338, buf2334, buf2354, buf2366, buf2362, buf2382, buf2394, buf2390, buf2410, buf2422, buf2418, buf2438, buf2450, buf2446, buf2466, buf2478, buf2474, buf2494, buf2506, buf2502, buf2522, buf2534, buf2530, buf2550, buf2562, buf2558, buf2578, buf2590, buf2586, buf2606, buf2618, buf2614, buf2634, buf2646, buf2642, buf2662, buf2674, buf2670, buf2690, buf2702, buf2698, buf2718, buf2730, buf2726, buf2746, buf2758, buf2754, buf2774, buf2786, buf2782, buf2802, buf2814, buf2810, buf2830, 256, grid=grid(256), stream=stream0)
del arg1_1
del buf1434
del buf1438
del buf1442
del buf1458
del buf1466
del buf1470
del buf1486
del buf1494
del buf1498
del buf1514
del buf1522
del buf1526
del buf1542
del buf1550
del buf1554
del buf1570
del buf1578
del buf1582
del buf1598
del buf1606
del buf1610
del buf1626
del buf1634
del buf1638
del buf1654
del buf1662
del buf1666
del buf1682
del buf1690
del buf1694
del buf1710
del buf1718
del buf1722
del buf1738
del buf1746
del buf1750
del buf1766
del buf1774
del buf1778
del buf1794
del buf1802
del buf1806
del buf1822
del buf1830
del buf1834
del buf1850
del buf1858
del buf1862
del buf1878
del buf1886
del buf1890
del buf1906
del buf1914
del buf1918
del buf1934
del buf1942
del buf1946
del buf1962
del buf1970
del buf1974
del buf1990
del buf1998
del buf2002
del buf2018
del buf2026
del buf2030
del buf2046
del buf2054
del buf2058
del buf2074
del buf2082
del buf2086
del buf2102
del buf2110
del buf2114
del buf2130
del buf2138
del buf2142
del buf2158
del buf2166
del buf2170
del buf2186
del buf2194
del buf2198
del buf2214
del buf2222
del buf2226
del buf2242
del buf2250
del buf2254
del buf2270
del buf2278
del buf2282
del buf2298
del buf2306
del buf2310
del buf2326
del buf2334
del buf2338
del buf2354
del buf2362
del buf2366
del buf2382
del buf2390
del buf2394
del buf2410
del buf2418
del buf2422
del buf2438
del buf2446
del buf2450
del buf2466
del buf2474
del buf2478
del buf2494
del buf2502
del buf2506
del buf2522
del buf2530
del buf2534
del buf2550
del buf2558
del buf2562
del buf2578
del buf2586
del buf2590
del buf2606
del buf2614
del buf2618
del buf2634
del buf2642
del buf2646
del buf2662
del buf2670
del buf2674
del buf2690
del buf2698
del buf2702
del buf2718
del buf2726
del buf2730
del buf2746
del buf2754
del buf2758
del buf2774
del buf2782
del buf2786
del buf2802
del buf2810
del buf2814
del buf2830
del buf2832
del buf2833
del buf2834
del buf2835
del buf2836
del buf2837
del buf2838
del buf2839
del buf2840
del buf2841
del buf2842
del buf2843
del buf2844
del buf2845
del buf2846
del buf2847
del buf2848
del buf2849
del buf2850
del buf2851
del buf2852
del buf2853
del buf2854
del buf2855
del buf2856
del buf2857
del buf2858
del buf2859
del buf2860
del buf2861
del buf2862
del buf2863
del buf2864
del buf2865
del buf2866
del buf2867
del buf2868
del buf2869
del buf2870
del buf2871
del buf2872
del buf2873
del buf2874
del buf2875
del buf2876
del buf2877
del buf2878
del buf2879
del buf2880
buf2882 = buf14; del buf14 # reuse
buf2883 = buf2882; del buf2882 # reuse
buf2884 = buf2883; del buf2883 # reuse
buf2885 = buf102; del buf102 # reuse
buf2886 = buf130; del buf130 # reuse
buf2887 = buf158; del buf158 # reuse
buf2888 = buf186; del buf186 # reuse
buf2889 = buf214; del buf214 # reuse
buf2890 = buf242; del buf242 # reuse
buf2891 = buf270; del buf270 # reuse
buf2892 = buf2891; del buf2891 # reuse
buf2893 = buf2892; del buf2892 # reuse
buf2894 = buf2893; del buf2893 # reuse
buf2895 = buf2894; del buf2894 # reuse
buf2896 = buf2895; del buf2895 # reuse
buf2897 = buf2896; del buf2896 # reuse
buf2898 = buf2897; del buf2897 # reuse
buf2899 = buf2898; del buf2898 # reuse
buf2900 = buf2899; del buf2899 # reuse
buf2901 = buf2900; del buf2900 # reuse
buf2902 = buf2901; del buf2901 # reuse
buf2903 = buf2902; del buf2902 # reuse
buf2904 = buf2903; del buf2903 # reuse
buf2905 = buf2904; del buf2904 # reuse
buf2906 = buf2905; del buf2905 # reuse
buf2907 = buf2906; del buf2906 # reuse
buf2908 = buf2907; del buf2907 # reuse
buf2909 = buf2908; del buf2908 # reuse
buf2910 = buf2909; del buf2909 # reuse
buf2911 = buf2910; del buf2910 # reuse
buf2912 = buf2911; del buf2911 # reuse
buf2913 = buf2912; del buf2912 # reuse
buf2914 = buf2913; del buf2913 # reuse
buf2915 = buf2914; del buf2914 # reuse
buf2916 = buf2915; del buf2915 # reuse
buf2917 = buf1002; del buf1002 # reuse
buf2918 = buf1026; del buf1026 # reuse
buf2919 = buf1054; del buf1054 # reuse
buf2920 = buf1082; del buf1082 # reuse
buf2921 = buf1110; del buf1110 # reuse
buf2922 = buf1138; del buf1138 # reuse
buf2923 = buf1166; del buf1166 # reuse
buf2924 = buf1194; del buf1194 # reuse
buf2925 = buf1222; del buf1222 # reuse
buf2926 = buf1250; del buf1250 # reuse
buf2927 = buf1278; del buf1278 # reuse
buf2928 = buf1306; del buf1306 # reuse
buf2929 = buf1334; del buf1334 # reuse
buf2930 = buf1362; del buf1362 # reuse
buf2931 = buf1390; del buf1390 # reuse
# Topologically Sorted Source Nodes: [p1_1, p3_1, min_3, p2_1, img, p1_3, p3_3, min_7, p2_3, img_1, p1_5, p3_5, min_11, p2_5, img_2, p1_7, p3_7, min_15, p2_7, img_3, p1_9, p3_9, min_19, p2_9, img_4, p1_11, p3_11, min_23, p2_11, img_5, p1_13, p3_13, min_27, p2_13, img_6, p1_15, p3_15, min_31, p2_15, img_7, p1_17, p3_17, min_35, p2_17, img_8, p1_19, p3_19, min_39, p2_19, img_9, p1_21, p3_21, min_43, p2_21, img_10, p1_23, p3_23, min_47, p2_23, img_11, p1_25, p3_25, min_51, p2_25, img_12, p1_27, p3_27, min_55, p2_27, img_13, p1_29, p3_29, min_59, p2_29, img_14, p1_31, p3_31, min_63, p2_31, img_15, p1_33, p3_33, min_67, p2_33, img_16, p1_35, p3_35, min_71, p2_35, img_17, p1_37, p3_37, min_75, p2_37, img_18, p1_39, p3_39, min_79, p2_39, img_19, p1_41, p3_41, min_83, p2_41, img_20, p1_43, p3_43, min_87, p2_43, img_21, p1_45, p3_45, min_91, p2_45, img_22, p1_47, p3_47, min_95, p2_47, img_23, p1_49, p3_49, min_99, p2_49, img_24, p1_51, p3_51, min_103, p2_51, img_25, p1_53, p3_53, min_107, p2_53, img_26, p1_55, p3_55, min_111, p2_55, img_27, p1_57, p3_57, min_115, p2_57, img_28, p1_59, p3_59, min_119, p2_59, img_29, p1_61, p3_61, min_123, p2_61, img_30, p1_63, p3_63, min_127, p2_63, img_31, p1_65, p3_65, min_131, p2_65, img_32, p1_67, p3_67, min_135, p2_67, img_33, p1_69, p3_69, min_139, p2_69, img_34, p1_71, p3_71, min_143, p2_71, img_35, p1_73, p3_73, min_147, p2_73, img_36, p1_75, p3_75, min_151, p2_75, img_37, p1_77, p3_77, min_155, p2_77, img_38, p1_79, p3_79, min_159, p2_79, img_39, p1_81, p3_81, min_163, p2_81, img_40, p1_83, p3_83, min_167, p2_83, img_41, p1_85, p3_85, min_171, p2_85, img_42, p1_87, p3_87, min_175, p2_87, img_43, p1_89, p3_89, min_179, p2_89, img_44, p1_91, p3_91, min_183, p2_91, img_45, p1_93, p3_93, min_187, p2_93, img_46, p1_95, p3_95, min_191, p2_95, img_47, p1_97, p3_97, min_195, p2_97, img_48, p1_99, p3_99, min_199, p2_99, img_49, sub, skel, sub_1, delta, mul, sub_2, relu_2, skel_1, sub_3, delta_1, mul_1, sub_4, relu_4, skel_2, sub_5, delta_2, mul_2, sub_6, relu_6, skel_3, sub_7, delta_3, mul_3, sub_8, relu_8, skel_4, sub_9, delta_4, mul_4, sub_10, relu_10, skel_5, sub_11, delta_5, mul_5, sub_12, relu_12, skel_6, sub_13, delta_6, mul_6, sub_14, relu_14, skel_7, sub_15, delta_7, mul_7, sub_16, relu_16, skel_8, sub_17, delta_8, mul_8, sub_18, relu_18, skel_9, sub_19, delta_9, mul_9, sub_20, relu_20, skel_10, sub_21, delta_10, mul_10, sub_22, relu_22, skel_11, sub_23, delta_11, mul_11, sub_24, relu_24, skel_12, sub_25, delta_12, mul_12, sub_26, relu_26, skel_13, sub_27, delta_13, mul_13, sub_28, relu_28, skel_14, sub_29, delta_14, mul_14, sub_30, relu_30, skel_15, sub_31, delta_15, mul_15, sub_32, relu_32, skel_16, sub_33, delta_16, mul_16, sub_34, relu_34, skel_17, sub_35, delta_17, mul_17, sub_36, relu_36, skel_18, sub_37, delta_18, mul_18, sub_38, relu_38, skel_19, sub_39, delta_19, mul_19, sub_40, relu_40, skel_20, sub_41, delta_20, mul_20, sub_42, relu_42, skel_21, sub_43, delta_21, mul_21, sub_44, relu_44, skel_22, sub_45, delta_22, mul_22, sub_46, relu_46, skel_23, sub_47, delta_23, mul_23, sub_48, relu_48, skel_24, sub_49, delta_24, mul_24, sub_50, relu_50, skel_25, sub_51, delta_25, mul_25, sub_52, relu_52, skel_26, sub_53, delta_26, mul_26, sub_54, relu_54, skel_27, sub_55, delta_27, mul_27, sub_56, relu_56, skel_28, sub_57, delta_28, mul_28, sub_58, relu_58, skel_29, sub_59, delta_29, mul_29, sub_60, relu_60, skel_30, sub_61, delta_30, mul_30, sub_62, relu_62, skel_31, sub_63, delta_31, mul_31, sub_64, relu_64, skel_32, sub_65, delta_32, mul_32, sub_66, relu_66, skel_33, sub_67, delta_33, mul_33, sub_68, relu_68, skel_34, sub_69, delta_34, mul_34, sub_70, relu_70, skel_35, sub_71, delta_35, mul_35, sub_72, relu_72, skel_36, sub_73, delta_36, mul_36, sub_74, relu_74, skel_37, sub_75, delta_37, mul_37, sub_76, relu_76, skel_38, sub_77, delta_38, mul_38, sub_78, relu_78, skel_39, sub_79, delta_39, mul_39, sub_80, relu_80, skel_40, sub_81, delta_40, mul_40, sub_82, relu_82, skel_41, sub_83, delta_41, mul_41, sub_84, relu_84, skel_42, sub_85, delta_42, mul_42, sub_86, relu_86, skel_43, sub_87, delta_43, mul_43, sub_88, relu_88, skel_44, sub_89, delta_44, mul_44, sub_90, relu_90, skel_45, sub_91, delta_45, mul_45, sub_92, relu_92, skel_46, sub_93, delta_46, mul_46, sub_94, relu_94, skel_47, sub_95, delta_47, mul_47, sub_96, relu_96, skel_48, sub_97, delta_48, mul_48, sub_98, relu_98, skel_49, sub_99, delta_49, mul_49, sub_100, relu_100, skel_50], Original ATen: [aten.neg, aten.minimum, aten.sub, aten.relu, aten.mul, aten.add]
triton_poi_fused_add_minimum_mul_neg_relu_sub_6.run(buf2884, buf2885, buf2886, buf2887, buf2888, buf2889, buf2890, buf2916, buf2917, buf2918, buf2919, buf2920, buf2921, buf2922, buf2923, buf2924, buf2925, buf2926, buf2927, buf2928, buf2929, buf2930, buf2931, arg0_1, buf18, buf26, buf22, buf42, buf46, buf54, buf50, buf70, buf74, buf82, buf78, buf98, buf110, buf106, buf126, buf138, buf134, buf154, buf166, buf162, buf182, buf194, buf190, buf210, buf222, buf218, buf238, buf250, buf246, buf266, buf278, buf274, buf294, buf298, buf306, buf302, buf322, buf326, buf334, buf330, buf350, buf354, buf362, buf358, buf378, buf382, buf390, buf386, buf406, buf410, buf418, buf414, buf434, buf438, buf446, buf442, buf462, buf466, buf474, buf470, buf490, buf494, buf502, buf498, buf518, buf522, buf530, buf526, buf546, buf550, buf558, buf554, buf574, buf578, buf586, buf582, buf602, buf606, buf614, buf610, buf630, buf634, buf642, buf638, buf658, buf662, buf670, buf666, buf686, buf690, buf698, buf694, buf714, buf718, buf726, buf722, buf742, buf746, buf754, buf750, buf770, buf774, buf782, buf778, buf798, buf802, buf810, buf806, buf826, buf830, buf838, buf834, buf854, buf858, buf866, buf862, buf882, buf886, buf894, buf890, buf910, buf914, buf922, buf918, buf938, buf942, buf950, buf946, buf966, buf970, buf978, buf974, buf994, buf998, buf1006, buf1022, buf1034, buf1030, buf1050, buf1062, buf1058, buf1078, buf1090, buf1086, buf1106, buf1118, buf1114, buf1134, buf1146, buf1142, buf1162, buf1174, buf1170, buf1190, buf1202, buf1198, buf1218, buf1230, buf1226, buf1246, buf1258, buf1254, buf1274, buf1286, buf1282, buf1302, buf1314, buf1310, buf1330, buf1342, buf1338, buf1358, buf1370, buf1366, buf1386, buf1398, buf1394, buf1414, 256, grid=grid(256), stream=stream0)
del arg0_1
del buf1006
del buf1022
del buf1030
del buf1034
del buf1050
del buf1058
del buf106
del buf1062
del buf1078
del buf1086
del buf1090
del buf110
del buf1106
del buf1114
del buf1118
del buf1134
del buf1142
del buf1146
del buf1162
del buf1170
del buf1174
del buf1190
del buf1198
del buf1202
del buf1218
del buf1226
del buf1230
del buf1246
del buf1254
del buf1258
del buf126
del buf1274
del buf1282
del buf1286
del buf1302
del buf1310
del buf1314
del buf1330
del buf1338
del buf134
del buf1342
del buf1358
del buf1366
del buf1370
del buf138
del buf1386
del buf1394
del buf1398
del buf1414
del buf154
del buf162
del buf166
del buf18
del buf182
del buf190
del buf194
del buf210
del buf218
del buf22
del buf222
del buf238
del buf246
del buf250
del buf26
del buf266
del buf274
del buf278
del buf2884
del buf2885
del buf2886
del buf2887
del buf2888
del buf2889
del buf2890
del buf2916
del buf2917
del buf2918
del buf2919
del buf2920
del buf2921
del buf2922
del buf2923
del buf2924
del buf2925
del buf2926
del buf2927
del buf2928
del buf2929
del buf2930
del buf294
del buf298
del buf302
del buf306
del buf322
del buf326
del buf330
del buf334
del buf350
del buf354
del buf358
del buf362
del buf378
del buf382
del buf386
del buf390
del buf406
del buf410
del buf414
del buf418
del buf42
del buf434
del buf438
del buf442
del buf446
del buf46
del buf462
del buf466
del buf470
del buf474
del buf490
del buf494
del buf498
del buf50
del buf502
del buf518
del buf522
del buf526
del buf530
del buf54
del buf546
del buf550
del buf554
del buf558
del buf574
del buf578
del buf582
del buf586
del buf602
del buf606
del buf610
del buf614
del buf630
del buf634
del buf638
del buf642
del buf658
del buf662
del buf666
del buf670
del buf686
del buf690
del buf694
del buf698
del buf70
del buf714
del buf718
del buf722
del buf726
del buf74
del buf742
del buf746
del buf750
del buf754
del buf770
del buf774
del buf778
del buf78
del buf782
del buf798
del buf802
del buf806
del buf810
del buf82
del buf826
del buf830
del buf834
del buf838
del buf854
del buf858
del buf862
del buf866
del buf882
del buf886
del buf890
del buf894
del buf910
del buf914
del buf918
del buf922
del buf938
del buf942
del buf946
del buf950
del buf966
del buf970
del buf974
del buf978
del buf98
del buf994
del buf998
return (buf2881, buf2931, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
from torch import nn
import torch.jit
import torch.nn.functional as F
import torch.nn.functional
def sum_tensor(inp, axes, keepdim=False):
axes = np.unique(axes).astype(int)
if keepdim:
for ax in axes:
inp = inp.sum(int(ax), keepdim=True)
else:
for ax in sorted(axes, reverse=True):
inp = inp.sum(int(ax))
return inp
def get_tp_fp_fn_tn(net_output, gt, axes=None, mask=None, square=False):
"""
net_output must be (b, c, x, y(, z)))
gt must be a label map (shape (b, 1, x, y(, z)) OR shape (b, x, y(, z))) or one hot encoding (b, c, x, y(, z))
if mask is provided it must have shape (b, 1, x, y(, z)))
:param net_output:
:param gt:
:param axes: can be (, ) = no summation
:param mask: mask must be 1 for valid pixels and 0 for invalid pixels
:param square: if True then fp, tp and fn will be squared before summation
:return:
"""
if axes is None:
axes = tuple(range(2, len(net_output.size())))
shp_x = net_output.shape
shp_y = gt.shape
with torch.no_grad():
if len(shp_x) != len(shp_y):
gt = gt.view((shp_y[0], 1, *shp_y[1:]))
if all([(i == j) for i, j in zip(net_output.shape, gt.shape)]):
y_onehot = gt
else:
gt = gt.long()
y_onehot = torch.zeros(shp_x)
if net_output.device.type == 'cuda':
y_onehot = y_onehot
y_onehot.scatter_(1, gt, 1)
tp = net_output * y_onehot
fp = net_output * (1 - y_onehot)
fn = (1 - net_output) * y_onehot
tn = (1 - net_output) * (1 - y_onehot)
if mask is not None:
tp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tp,
dim=1)), dim=1)
fp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fp,
dim=1)), dim=1)
fn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fn,
dim=1)), dim=1)
tn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tn,
dim=1)), dim=1)
if square:
tp = tp ** 2
fp = fp ** 2
fn = fn ** 2
tn = tn ** 2
if len(axes) > 0:
tp = sum_tensor(tp, axes, keepdim=False)
fp = sum_tensor(fp, axes, keepdim=False)
fn = sum_tensor(fn, axes, keepdim=False)
tn = sum_tensor(tn, axes, keepdim=False)
return tp, fp, fn, tn
def soft_erode(I):
p1 = -F.max_pool3d(-I, (3, 1, 1), (1, 1, 1), (1, 0, 0))
p2 = -F.max_pool3d(-I, (1, 3, 1), (1, 1, 1), (0, 1, 0))
p3 = -F.max_pool3d(-I, (1, 1, 3), (1, 1, 1), (0, 0, 1))
return torch.min(torch.min(p1, p3), p2)
def soft_dilate(I):
return F.max_pool3d(I, (3, 3, 3), (1, 1, 1), (1, 1, 1))
def soft_open(I):
return soft_dilate(soft_erode(I))
def soft_skel(img, k=50):
img1 = soft_open(img)
skel = F.relu(img - img1)
for iter in range(k):
img = soft_erode(img)
img1 = soft_open(img)
delta = F.relu(img - img1)
skel = skel + F.relu(delta - skel * delta)
if torch.cuda.is_available():
del img1
del delta
return skel
class SoftClDiceLoss(nn.Module):
def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True,
smooth=1.0, k=2):
"""
"""
super(SoftClDiceLoss, self).__init__()
self.do_bg = do_bg
self.batch_dice = batch_dice
self.apply_nonlin = apply_nonlin
self.smooth = smooth
self.k = k
def softCenterline(self, I):
max = nn.MaxPool3d(3, stride=1, padding=1)
relu = nn.ReLU()
Ip = max(-max(-I))
cl = relu(I - Ip)
for iter in range(self.k):
I = -max(-I)
Ip = max(-max(-I))
cl = cl + cl * relu(I - Ip)
return cl
def forward(self, x, y, loss_mask=None):
shp_x = x.shape
if self.batch_dice:
axes = [0] + list(range(2, len(shp_x)))
else:
axes = list(range(2, len(shp_x)))
if self.apply_nonlin is not None:
x = self.apply_nonlin(x)
clp = soft_skel(x)
cll = soft_skel(y)
tp, _fp, fn, _tn = get_tp_fp_fn_tn(x, cll, axes, loss_mask, False)
tpc, fpc, _fnc, _tnc = get_tp_fp_fn_tn(clp, y, axes, loss_mask, False)
clp2vollnom = tpc + self.smooth
clp2vollden = tpc + fpc + self.smooth
clp2voll = clp2vollnom / clp2vollden
cll2volpnom = tp + self.smooth
cll2volpden = tp + fn + self.smooth
cll2volp = cll2volpnom / cll2volpden
dc = 2 * clp2voll * cll2volp / (cll2volp + clp2voll + 1e-08)
if not self.do_bg:
if self.batch_dice:
dc = dc[1:]
else:
dc = dc[:, 1:]
dc = dc.mean()
return 1 - dc
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import numpy as np
from torch import nn
import torch.jit
import torch.nn.functional as F
import torch.nn.functional
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_neg_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2, out_ptr3,
out_ptr4, out_ptr5, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = -tmp0
tl.store(out_ptr0 + x0, tmp1, xmask)
tl.store(out_ptr1 + x0, tmp1, xmask)
tl.store(out_ptr2 + x0, tmp1, xmask)
tl.store(out_ptr3 + x0, tmp1, xmask)
tl.store(out_ptr4 + x0, tmp1, xmask)
tl.store(out_ptr5 + x0, tmp1, xmask)
@triton.jit
def triton_poi_fused_minimum_neg_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_out_ptr0 + x0, xmask)
tmp5 = tl.load(in_ptr1 + x0, xmask)
tmp1 = -tmp0
tmp3 = -tmp2
tmp4 = triton_helpers.minimum(tmp1, tmp3)
tmp6 = -tmp5
tmp7 = triton_helpers.minimum(tmp4, tmp6)
tl.store(in_out_ptr0 + x0, tmp7, xmask)
@triton.jit
def triton_poi_fused_minimum_neg_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp5 = tl.load(in_ptr2 + x0, xmask)
tmp1 = -tmp0
tmp3 = -tmp2
tmp4 = triton_helpers.minimum(tmp1, tmp3)
tmp6 = -tmp5
tmp7 = triton_helpers.minimum(tmp4, tmp6)
tmp8 = -tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp8, xmask)
tl.store(out_ptr2 + x0, tmp8, xmask)
tl.store(out_ptr3 + x0, tmp8, xmask)
tl.store(out_ptr4 + x0, tmp8, xmask)
tl.store(out_ptr5 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_minimum_neg_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr0 + x0, xmask)
tmp5 = tl.load(in_ptr1 + x0, xmask)
tmp1 = -tmp0
tmp3 = -tmp2
tmp4 = triton_helpers.minimum(tmp1, tmp3)
tmp6 = -tmp5
tmp7 = triton_helpers.minimum(tmp4, tmp6)
tl.store(in_out_ptr0 + x0, tmp7, xmask)
@triton.jit
def triton_poi_fused_minimum_neg_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp5 = tl.load(in_ptr2 + x0, xmask)
tmp1 = -tmp0
tmp3 = -tmp2
tmp4 = triton_helpers.minimum(tmp1, tmp3)
tmp6 = -tmp5
tmp7 = triton_helpers.minimum(tmp4, tmp6)
tmp8 = -tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp8, xmask)
tl.store(out_ptr2 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_minimum_mul_neg_relu_sub_5(in_out_ptr0,
in_out_ptr1, in_out_ptr2, in_out_ptr3, in_out_ptr4, in_out_ptr5,
in_out_ptr6, in_out_ptr7, in_out_ptr8, in_out_ptr9, in_out_ptr10,
in_out_ptr11, in_out_ptr12, in_out_ptr13, in_out_ptr14, in_out_ptr15,
in_out_ptr16, in_out_ptr17, in_out_ptr18, in_out_ptr19, in_out_ptr20,
in_out_ptr21, in_out_ptr22, in_out_ptr23, in_out_ptr24, in_out_ptr25,
in_out_ptr26, in_out_ptr27, in_out_ptr28, in_out_ptr29, in_out_ptr30,
in_out_ptr31, in_out_ptr32, in_out_ptr33, in_out_ptr34, in_out_ptr35,
in_out_ptr36, in_out_ptr37, in_out_ptr38, in_out_ptr39, in_out_ptr40,
in_out_ptr41, in_out_ptr42, in_out_ptr43, in_out_ptr44, in_out_ptr45,
in_out_ptr46, in_out_ptr47, in_out_ptr48, in_out_ptr49, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8,
in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15,
in_ptr16, in_ptr17, in_ptr18, in_ptr19, in_ptr20, in_ptr21, in_ptr22,
in_ptr23, in_ptr24, in_ptr25, in_ptr26, in_ptr27, in_ptr28, in_ptr29,
in_ptr30, in_ptr31, in_ptr32, in_ptr33, in_ptr34, in_ptr35, in_ptr36,
in_ptr37, in_ptr38, in_ptr39, in_ptr40, in_ptr41, in_ptr42, in_ptr43,
in_ptr44, in_ptr45, in_ptr46, in_ptr47, in_ptr48, in_ptr49, in_ptr50,
in_ptr51, in_ptr52, in_ptr53, in_ptr54, in_ptr55, in_ptr56, in_ptr57,
in_ptr58, in_ptr59, in_ptr60, in_ptr61, in_ptr62, in_ptr63, in_ptr64,
in_ptr65, in_ptr66, in_ptr67, in_ptr68, in_ptr69, in_ptr70, in_ptr71,
in_ptr72, in_ptr73, in_ptr74, in_ptr75, in_ptr76, in_ptr77, in_ptr78,
in_ptr79, in_ptr80, in_ptr81, in_ptr82, in_ptr83, in_ptr84, in_ptr85,
in_ptr86, in_ptr87, in_ptr88, in_ptr89, in_ptr90, in_ptr91, in_ptr92,
in_ptr93, in_ptr94, in_ptr95, in_ptr96, in_ptr97, in_ptr98, in_ptr99,
in_ptr100, in_ptr101, in_ptr102, in_ptr103, in_ptr104, in_ptr105,
in_ptr106, in_ptr107, in_ptr108, in_ptr109, in_ptr110, in_ptr111,
in_ptr112, in_ptr113, in_ptr114, in_ptr115, in_ptr116, in_ptr117,
in_ptr118, in_ptr119, in_ptr120, in_ptr121, in_ptr122, in_ptr123,
in_ptr124, in_ptr125, in_ptr126, in_ptr127, in_ptr128, in_ptr129,
in_ptr130, in_ptr131, in_ptr132, in_ptr133, in_ptr134, in_ptr135,
in_ptr136, in_ptr137, in_ptr138, in_ptr139, in_ptr140, in_ptr141,
in_ptr142, in_ptr143, in_ptr144, in_ptr145, in_ptr146, in_ptr147,
in_ptr148, in_ptr149, in_ptr150, in_ptr151, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_out_ptr0 + x0, xmask)
tmp5 = tl.load(in_ptr1 + x0, xmask)
tmp7 = tl.load(in_ptr2 + x0, xmask)
tmp10 = tl.load(in_ptr3 + x0, xmask)
tmp13 = tl.load(in_ptr4 + x0, xmask)
tmp20 = tl.load(in_out_ptr1 + x0, xmask)
tmp22 = tl.load(in_ptr5 + x0, xmask)
tmp25 = tl.load(in_ptr6 + x0, xmask)
tmp28 = tl.load(in_ptr7 + x0, xmask)
tmp35 = tl.load(in_out_ptr2 + x0, xmask)
tmp37 = tl.load(in_ptr8 + x0, xmask)
tmp40 = tl.load(in_ptr9 + x0, xmask)
tmp43 = tl.load(in_ptr10 + x0, xmask)
tmp50 = tl.load(in_out_ptr3 + x0, xmask)
tmp52 = tl.load(in_ptr11 + x0, xmask)
tmp55 = tl.load(in_ptr12 + x0, xmask)
tmp58 = tl.load(in_ptr13 + x0, xmask)
tmp65 = tl.load(in_out_ptr4 + x0, xmask)
tmp67 = tl.load(in_ptr14 + x0, xmask)
tmp70 = tl.load(in_ptr15 + x0, xmask)
tmp73 = tl.load(in_ptr16 + x0, xmask)
tmp80 = tl.load(in_out_ptr5 + x0, xmask)
tmp82 = tl.load(in_ptr17 + x0, xmask)
tmp85 = tl.load(in_ptr18 + x0, xmask)
tmp88 = tl.load(in_ptr19 + x0, xmask)
tmp95 = tl.load(in_out_ptr6 + x0, xmask)
tmp97 = tl.load(in_ptr20 + x0, xmask)
tmp100 = tl.load(in_ptr21 + x0, xmask)
tmp103 = tl.load(in_ptr22 + x0, xmask)
tmp110 = tl.load(in_out_ptr7 + x0, xmask)
tmp112 = tl.load(in_ptr23 + x0, xmask)
tmp115 = tl.load(in_ptr24 + x0, xmask)
tmp118 = tl.load(in_ptr25 + x0, xmask)
tmp125 = tl.load(in_out_ptr8 + x0, xmask)
tmp127 = tl.load(in_ptr26 + x0, xmask)
tmp130 = tl.load(in_ptr27 + x0, xmask)
tmp133 = tl.load(in_ptr28 + x0, xmask)
tmp140 = tl.load(in_out_ptr9 + x0, xmask)
tmp142 = tl.load(in_ptr29 + x0, xmask)
tmp145 = tl.load(in_ptr30 + x0, xmask)
tmp148 = tl.load(in_ptr31 + x0, xmask)
tmp155 = tl.load(in_out_ptr10 + x0, xmask)
tmp157 = tl.load(in_ptr32 + x0, xmask)
tmp160 = tl.load(in_ptr33 + x0, xmask)
tmp163 = tl.load(in_ptr34 + x0, xmask)
tmp170 = tl.load(in_out_ptr11 + x0, xmask)
tmp172 = tl.load(in_ptr35 + x0, xmask)
tmp175 = tl.load(in_ptr36 + x0, xmask)
tmp178 = tl.load(in_ptr37 + x0, xmask)
tmp185 = tl.load(in_out_ptr12 + x0, xmask)
tmp187 = tl.load(in_ptr38 + x0, xmask)
tmp190 = tl.load(in_ptr39 + x0, xmask)
tmp193 = tl.load(in_ptr40 + x0, xmask)
tmp200 = tl.load(in_out_ptr13 + x0, xmask)
tmp202 = tl.load(in_ptr41 + x0, xmask)
tmp205 = tl.load(in_ptr42 + x0, xmask)
tmp208 = tl.load(in_ptr43 + x0, xmask)
tmp215 = tl.load(in_out_ptr14 + x0, xmask)
tmp217 = tl.load(in_ptr44 + x0, xmask)
tmp220 = tl.load(in_ptr45 + x0, xmask)
tmp223 = tl.load(in_ptr46 + x0, xmask)
tmp230 = tl.load(in_out_ptr15 + x0, xmask)
tmp232 = tl.load(in_ptr47 + x0, xmask)
tmp235 = tl.load(in_ptr48 + x0, xmask)
tmp238 = tl.load(in_ptr49 + x0, xmask)
tmp245 = tl.load(in_out_ptr16 + x0, xmask)
tmp247 = tl.load(in_ptr50 + x0, xmask)
tmp250 = tl.load(in_ptr51 + x0, xmask)
tmp253 = tl.load(in_ptr52 + x0, xmask)
tmp260 = tl.load(in_out_ptr17 + x0, xmask)
tmp262 = tl.load(in_ptr53 + x0, xmask)
tmp265 = tl.load(in_ptr54 + x0, xmask)
tmp268 = tl.load(in_ptr55 + x0, xmask)
tmp275 = tl.load(in_out_ptr18 + x0, xmask)
tmp277 = tl.load(in_ptr56 + x0, xmask)
tmp280 = tl.load(in_ptr57 + x0, xmask)
tmp283 = tl.load(in_ptr58 + x0, xmask)
tmp290 = tl.load(in_out_ptr19 + x0, xmask)
tmp292 = tl.load(in_ptr59 + x0, xmask)
tmp295 = tl.load(in_ptr60 + x0, xmask)
tmp298 = tl.load(in_ptr61 + x0, xmask)
tmp305 = tl.load(in_out_ptr20 + x0, xmask)
tmp307 = tl.load(in_ptr62 + x0, xmask)
tmp310 = tl.load(in_ptr63 + x0, xmask)
tmp313 = tl.load(in_ptr64 + x0, xmask)
tmp320 = tl.load(in_out_ptr21 + x0, xmask)
tmp322 = tl.load(in_ptr65 + x0, xmask)
tmp325 = tl.load(in_ptr66 + x0, xmask)
tmp328 = tl.load(in_ptr67 + x0, xmask)
tmp335 = tl.load(in_out_ptr22 + x0, xmask)
tmp337 = tl.load(in_ptr68 + x0, xmask)
tmp340 = tl.load(in_ptr69 + x0, xmask)
tmp343 = tl.load(in_ptr70 + x0, xmask)
tmp350 = tl.load(in_out_ptr23 + x0, xmask)
tmp352 = tl.load(in_ptr71 + x0, xmask)
tmp355 = tl.load(in_ptr72 + x0, xmask)
tmp358 = tl.load(in_ptr73 + x0, xmask)
tmp365 = tl.load(in_out_ptr24 + x0, xmask)
tmp367 = tl.load(in_ptr74 + x0, xmask)
tmp370 = tl.load(in_ptr75 + x0, xmask)
tmp373 = tl.load(in_ptr76 + x0, xmask)
tmp380 = tl.load(in_out_ptr25 + x0, xmask)
tmp382 = tl.load(in_ptr77 + x0, xmask)
tmp385 = tl.load(in_ptr78 + x0, xmask)
tmp388 = tl.load(in_ptr79 + x0, xmask)
tmp395 = tl.load(in_out_ptr26 + x0, xmask)
tmp397 = tl.load(in_ptr80 + x0, xmask)
tmp400 = tl.load(in_ptr81 + x0, xmask)
tmp403 = tl.load(in_ptr82 + x0, xmask)
tmp410 = tl.load(in_out_ptr27 + x0, xmask)
tmp412 = tl.load(in_ptr83 + x0, xmask)
tmp415 = tl.load(in_ptr84 + x0, xmask)
tmp418 = tl.load(in_ptr85 + x0, xmask)
tmp425 = tl.load(in_out_ptr28 + x0, xmask)
tmp427 = tl.load(in_ptr86 + x0, xmask)
tmp430 = tl.load(in_ptr87 + x0, xmask)
tmp433 = tl.load(in_ptr88 + x0, xmask)
tmp440 = tl.load(in_out_ptr29 + x0, xmask)
tmp442 = tl.load(in_ptr89 + x0, xmask)
tmp445 = tl.load(in_ptr90 + x0, xmask)
tmp448 = tl.load(in_ptr91 + x0, xmask)
tmp455 = tl.load(in_out_ptr30 + x0, xmask)
tmp457 = tl.load(in_ptr92 + x0, xmask)
tmp460 = tl.load(in_ptr93 + x0, xmask)
tmp463 = tl.load(in_ptr94 + x0, xmask)
tmp470 = tl.load(in_out_ptr31 + x0, xmask)
tmp472 = tl.load(in_ptr95 + x0, xmask)
tmp475 = tl.load(in_ptr96 + x0, xmask)
tmp478 = tl.load(in_ptr97 + x0, xmask)
tmp485 = tl.load(in_out_ptr32 + x0, xmask)
tmp487 = tl.load(in_ptr98 + x0, xmask)
tmp490 = tl.load(in_ptr99 + x0, xmask)
tmp493 = tl.load(in_ptr100 + x0, xmask)
tmp500 = tl.load(in_out_ptr33 + x0, xmask)
tmp502 = tl.load(in_ptr101 + x0, xmask)
tmp505 = tl.load(in_ptr102 + x0, xmask)
tmp508 = tl.load(in_ptr103 + x0, xmask)
tmp515 = tl.load(in_out_ptr34 + x0, xmask)
tmp517 = tl.load(in_ptr104 + x0, xmask)
tmp520 = tl.load(in_ptr105 + x0, xmask)
tmp523 = tl.load(in_ptr106 + x0, xmask)
tmp530 = tl.load(in_out_ptr35 + x0, xmask)
tmp532 = tl.load(in_ptr107 + x0, xmask)
tmp535 = tl.load(in_ptr108 + x0, xmask)
tmp538 = tl.load(in_ptr109 + x0, xmask)
tmp545 = tl.load(in_out_ptr36 + x0, xmask)
tmp547 = tl.load(in_ptr110 + x0, xmask)
tmp550 = tl.load(in_ptr111 + x0, xmask)
tmp553 = tl.load(in_ptr112 + x0, xmask)
tmp560 = tl.load(in_out_ptr37 + x0, xmask)
tmp562 = tl.load(in_ptr113 + x0, xmask)
tmp565 = tl.load(in_ptr114 + x0, xmask)
tmp568 = tl.load(in_ptr115 + x0, xmask)
tmp575 = tl.load(in_out_ptr38 + x0, xmask)
tmp577 = tl.load(in_ptr116 + x0, xmask)
tmp580 = tl.load(in_ptr117 + x0, xmask)
tmp583 = tl.load(in_ptr118 + x0, xmask)
tmp590 = tl.load(in_out_ptr39 + x0, xmask)
tmp592 = tl.load(in_ptr119 + x0, xmask)
tmp595 = tl.load(in_ptr120 + x0, xmask)
tmp598 = tl.load(in_ptr121 + x0, xmask)
tmp605 = tl.load(in_out_ptr40 + x0, xmask)
tmp607 = tl.load(in_ptr122 + x0, xmask)
tmp610 = tl.load(in_ptr123 + x0, xmask)
tmp613 = tl.load(in_ptr124 + x0, xmask)
tmp620 = tl.load(in_out_ptr41 + x0, xmask)
tmp622 = tl.load(in_ptr125 + x0, xmask)
tmp625 = tl.load(in_ptr126 + x0, xmask)
tmp628 = tl.load(in_ptr127 + x0, xmask)
tmp635 = tl.load(in_out_ptr42 + x0, xmask)
tmp637 = tl.load(in_ptr128 + x0, xmask)
tmp640 = tl.load(in_ptr129 + x0, xmask)
tmp643 = tl.load(in_ptr130 + x0, xmask)
tmp650 = tl.load(in_out_ptr43 + x0, xmask)
tmp652 = tl.load(in_ptr131 + x0, xmask)
tmp655 = tl.load(in_ptr132 + x0, xmask)
tmp658 = tl.load(in_ptr133 + x0, xmask)
tmp665 = tl.load(in_out_ptr44 + x0, xmask)
tmp667 = tl.load(in_ptr134 + x0, xmask)
tmp670 = tl.load(in_ptr135 + x0, xmask)
tmp673 = tl.load(in_ptr136 + x0, xmask)
tmp680 = tl.load(in_out_ptr45 + x0, xmask)
tmp682 = tl.load(in_ptr137 + x0, xmask)
tmp685 = tl.load(in_ptr138 + x0, xmask)
tmp688 = tl.load(in_ptr139 + x0, xmask)
tmp695 = tl.load(in_out_ptr46 + x0, xmask)
tmp697 = tl.load(in_ptr140 + x0, xmask)
tmp700 = tl.load(in_ptr141 + x0, xmask)
tmp703 = tl.load(in_ptr142 + x0, xmask)
tmp710 = tl.load(in_out_ptr47 + x0, xmask)
tmp712 = tl.load(in_ptr143 + x0, xmask)
tmp715 = tl.load(in_ptr144 + x0, xmask)
tmp718 = tl.load(in_ptr145 + x0, xmask)
tmp725 = tl.load(in_out_ptr48 + x0, xmask)
tmp727 = tl.load(in_ptr146 + x0, xmask)
tmp730 = tl.load(in_ptr147 + x0, xmask)
tmp733 = tl.load(in_ptr148 + x0, xmask)
tmp740 = tl.load(in_out_ptr49 + x0, xmask)
tmp742 = tl.load(in_ptr149 + x0, xmask)
tmp745 = tl.load(in_ptr150 + x0, xmask)
tmp748 = tl.load(in_ptr151 + x0, xmask)
tmp2 = tmp0 - tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = -tmp5
tmp8 = -tmp7
tmp9 = triton_helpers.minimum(tmp6, tmp8)
tmp11 = -tmp10
tmp12 = triton_helpers.minimum(tmp9, tmp11)
tmp14 = tmp12 - tmp13
tmp15 = triton_helpers.maximum(tmp3, tmp14)
tmp16 = tmp4 * tmp15
tmp17 = tmp15 - tmp16
tmp18 = triton_helpers.maximum(tmp3, tmp17)
tmp19 = tmp4 + tmp18
tmp21 = -tmp20
tmp23 = -tmp22
tmp24 = triton_helpers.minimum(tmp21, tmp23)
tmp26 = -tmp25
tmp27 = triton_helpers.minimum(tmp24, tmp26)
tmp29 = tmp27 - tmp28
tmp30 = triton_helpers.maximum(tmp3, tmp29)
tmp31 = tmp19 * tmp30
tmp32 = tmp30 - tmp31
tmp33 = triton_helpers.maximum(tmp3, tmp32)
tmp34 = tmp19 + tmp33
tmp36 = -tmp35
tmp38 = -tmp37
tmp39 = triton_helpers.minimum(tmp36, tmp38)
tmp41 = -tmp40
tmp42 = triton_helpers.minimum(tmp39, tmp41)
tmp44 = tmp42 - tmp43
tmp45 = triton_helpers.maximum(tmp3, tmp44)
tmp46 = tmp34 * tmp45
tmp47 = tmp45 - tmp46
tmp48 = triton_helpers.maximum(tmp3, tmp47)
tmp49 = tmp34 + tmp48
tmp51 = -tmp50
tmp53 = -tmp52
tmp54 = triton_helpers.minimum(tmp51, tmp53)
tmp56 = -tmp55
tmp57 = triton_helpers.minimum(tmp54, tmp56)
tmp59 = tmp57 - tmp58
tmp60 = triton_helpers.maximum(tmp3, tmp59)
tmp61 = tmp49 * tmp60
tmp62 = tmp60 - tmp61
tmp63 = triton_helpers.maximum(tmp3, tmp62)
tmp64 = tmp49 + tmp63
tmp66 = -tmp65
tmp68 = -tmp67
tmp69 = triton_helpers.minimum(tmp66, tmp68)
tmp71 = -tmp70
tmp72 = triton_helpers.minimum(tmp69, tmp71)
tmp74 = tmp72 - tmp73
tmp75 = triton_helpers.maximum(tmp3, tmp74)
tmp76 = tmp64 * tmp75
tmp77 = tmp75 - tmp76
tmp78 = triton_helpers.maximum(tmp3, tmp77)
tmp79 = tmp64 + tmp78
tmp81 = -tmp80
tmp83 = -tmp82
tmp84 = triton_helpers.minimum(tmp81, tmp83)
tmp86 = -tmp85
tmp87 = triton_helpers.minimum(tmp84, tmp86)
tmp89 = tmp87 - tmp88
tmp90 = triton_helpers.maximum(tmp3, tmp89)
tmp91 = tmp79 * tmp90
tmp92 = tmp90 - tmp91
tmp93 = triton_helpers.maximum(tmp3, tmp92)
tmp94 = tmp79 + tmp93
tmp96 = -tmp95
tmp98 = -tmp97
tmp99 = triton_helpers.minimum(tmp96, tmp98)
tmp101 = -tmp100
tmp102 = triton_helpers.minimum(tmp99, tmp101)
tmp104 = tmp102 - tmp103
tmp105 = triton_helpers.maximum(tmp3, tmp104)
tmp106 = tmp94 * tmp105
tmp107 = tmp105 - tmp106
tmp108 = triton_helpers.maximum(tmp3, tmp107)
tmp109 = tmp94 + tmp108
tmp111 = -tmp110
tmp113 = -tmp112
tmp114 = triton_helpers.minimum(tmp111, tmp113)
tmp116 = -tmp115
tmp117 = triton_helpers.minimum(tmp114, tmp116)
tmp119 = tmp117 - tmp118
tmp120 = triton_helpers.maximum(tmp3, tmp119)
tmp121 = tmp109 * tmp120
tmp122 = tmp120 - tmp121
tmp123 = triton_helpers.maximum(tmp3, tmp122)
tmp124 = tmp109 + tmp123
tmp126 = -tmp125
tmp128 = -tmp127
tmp129 = triton_helpers.minimum(tmp126, tmp128)
tmp131 = -tmp130
tmp132 = triton_helpers.minimum(tmp129, tmp131)
tmp134 = tmp132 - tmp133
tmp135 = triton_helpers.maximum(tmp3, tmp134)
tmp136 = tmp124 * tmp135
tmp137 = tmp135 - tmp136
tmp138 = triton_helpers.maximum(tmp3, tmp137)
tmp139 = tmp124 + tmp138
tmp141 = -tmp140
tmp143 = -tmp142
tmp144 = triton_helpers.minimum(tmp141, tmp143)
tmp146 = -tmp145
tmp147 = triton_helpers.minimum(tmp144, tmp146)
tmp149 = tmp147 - tmp148
tmp150 = triton_helpers.maximum(tmp3, tmp149)
tmp151 = tmp139 * tmp150
tmp152 = tmp150 - tmp151
tmp153 = triton_helpers.maximum(tmp3, tmp152)
tmp154 = tmp139 + tmp153
tmp156 = -tmp155
tmp158 = -tmp157
tmp159 = triton_helpers.minimum(tmp156, tmp158)
tmp161 = -tmp160
tmp162 = triton_helpers.minimum(tmp159, tmp161)
tmp164 = tmp162 - tmp163
tmp165 = triton_helpers.maximum(tmp3, tmp164)
tmp166 = tmp154 * tmp165
tmp167 = tmp165 - tmp166
tmp168 = triton_helpers.maximum(tmp3, tmp167)
tmp169 = tmp154 + tmp168
tmp171 = -tmp170
tmp173 = -tmp172
tmp174 = triton_helpers.minimum(tmp171, tmp173)
tmp176 = -tmp175
tmp177 = triton_helpers.minimum(tmp174, tmp176)
tmp179 = tmp177 - tmp178
tmp180 = triton_helpers.maximum(tmp3, tmp179)
tmp181 = tmp169 * tmp180
tmp182 = tmp180 - tmp181
tmp183 = triton_helpers.maximum(tmp3, tmp182)
tmp184 = tmp169 + tmp183
tmp186 = -tmp185
tmp188 = -tmp187
tmp189 = triton_helpers.minimum(tmp186, tmp188)
tmp191 = -tmp190
tmp192 = triton_helpers.minimum(tmp189, tmp191)
tmp194 = tmp192 - tmp193
tmp195 = triton_helpers.maximum(tmp3, tmp194)
tmp196 = tmp184 * tmp195
tmp197 = tmp195 - tmp196
tmp198 = triton_helpers.maximum(tmp3, tmp197)
tmp199 = tmp184 + tmp198
tmp201 = -tmp200
tmp203 = -tmp202
tmp204 = triton_helpers.minimum(tmp201, tmp203)
tmp206 = -tmp205
tmp207 = triton_helpers.minimum(tmp204, tmp206)
tmp209 = tmp207 - tmp208
tmp210 = triton_helpers.maximum(tmp3, tmp209)
tmp211 = tmp199 * tmp210
tmp212 = tmp210 - tmp211
tmp213 = triton_helpers.maximum(tmp3, tmp212)
tmp214 = tmp199 + tmp213
tmp216 = -tmp215
tmp218 = -tmp217
tmp219 = triton_helpers.minimum(tmp216, tmp218)
tmp221 = -tmp220
tmp222 = triton_helpers.minimum(tmp219, tmp221)
tmp224 = tmp222 - tmp223
tmp225 = triton_helpers.maximum(tmp3, tmp224)
tmp226 = tmp214 * tmp225
tmp227 = tmp225 - tmp226
tmp228 = triton_helpers.maximum(tmp3, tmp227)
tmp229 = tmp214 + tmp228
tmp231 = -tmp230
tmp233 = -tmp232
tmp234 = triton_helpers.minimum(tmp231, tmp233)
tmp236 = -tmp235
tmp237 = triton_helpers.minimum(tmp234, tmp236)
tmp239 = tmp237 - tmp238
tmp240 = triton_helpers.maximum(tmp3, tmp239)
tmp241 = tmp229 * tmp240
tmp242 = tmp240 - tmp241
tmp243 = triton_helpers.maximum(tmp3, tmp242)
tmp244 = tmp229 + tmp243
tmp246 = -tmp245
tmp248 = -tmp247
tmp249 = triton_helpers.minimum(tmp246, tmp248)
tmp251 = -tmp250
tmp252 = triton_helpers.minimum(tmp249, tmp251)
tmp254 = tmp252 - tmp253
tmp255 = triton_helpers.maximum(tmp3, tmp254)
tmp256 = tmp244 * tmp255
tmp257 = tmp255 - tmp256
tmp258 = triton_helpers.maximum(tmp3, tmp257)
tmp259 = tmp244 + tmp258
tmp261 = -tmp260
tmp263 = -tmp262
tmp264 = triton_helpers.minimum(tmp261, tmp263)
tmp266 = -tmp265
tmp267 = triton_helpers.minimum(tmp264, tmp266)
tmp269 = tmp267 - tmp268
tmp270 = triton_helpers.maximum(tmp3, tmp269)
tmp271 = tmp259 * tmp270
tmp272 = tmp270 - tmp271
tmp273 = triton_helpers.maximum(tmp3, tmp272)
tmp274 = tmp259 + tmp273
tmp276 = -tmp275
tmp278 = -tmp277
tmp279 = triton_helpers.minimum(tmp276, tmp278)
tmp281 = -tmp280
tmp282 = triton_helpers.minimum(tmp279, tmp281)
tmp284 = tmp282 - tmp283
tmp285 = triton_helpers.maximum(tmp3, tmp284)
tmp286 = tmp274 * tmp285
tmp287 = tmp285 - tmp286
tmp288 = triton_helpers.maximum(tmp3, tmp287)
tmp289 = tmp274 + tmp288
tmp291 = -tmp290
tmp293 = -tmp292
tmp294 = triton_helpers.minimum(tmp291, tmp293)
tmp296 = -tmp295
tmp297 = triton_helpers.minimum(tmp294, tmp296)
tmp299 = tmp297 - tmp298
tmp300 = triton_helpers.maximum(tmp3, tmp299)
tmp301 = tmp289 * tmp300
tmp302 = tmp300 - tmp301
tmp303 = triton_helpers.maximum(tmp3, tmp302)
tmp304 = tmp289 + tmp303
tmp306 = -tmp305
tmp308 = -tmp307
tmp309 = triton_helpers.minimum(tmp306, tmp308)
tmp311 = -tmp310
tmp312 = triton_helpers.minimum(tmp309, tmp311)
tmp314 = tmp312 - tmp313
tmp315 = triton_helpers.maximum(tmp3, tmp314)
tmp316 = tmp304 * tmp315
tmp317 = tmp315 - tmp316
tmp318 = triton_helpers.maximum(tmp3, tmp317)
tmp319 = tmp304 + tmp318
tmp321 = -tmp320
tmp323 = -tmp322
tmp324 = triton_helpers.minimum(tmp321, tmp323)
tmp326 = -tmp325
tmp327 = triton_helpers.minimum(tmp324, tmp326)
tmp329 = tmp327 - tmp328
tmp330 = triton_helpers.maximum(tmp3, tmp329)
tmp331 = tmp319 * tmp330
tmp332 = tmp330 - tmp331
tmp333 = triton_helpers.maximum(tmp3, tmp332)
tmp334 = tmp319 + tmp333
tmp336 = -tmp335
tmp338 = -tmp337
tmp339 = triton_helpers.minimum(tmp336, tmp338)
tmp341 = -tmp340
tmp342 = triton_helpers.minimum(tmp339, tmp341)
tmp344 = tmp342 - tmp343
tmp345 = triton_helpers.maximum(tmp3, tmp344)
tmp346 = tmp334 * tmp345
tmp347 = tmp345 - tmp346
tmp348 = triton_helpers.maximum(tmp3, tmp347)
tmp349 = tmp334 + tmp348
tmp351 = -tmp350
tmp353 = -tmp352
tmp354 = triton_helpers.minimum(tmp351, tmp353)
tmp356 = -tmp355
tmp357 = triton_helpers.minimum(tmp354, tmp356)
tmp359 = tmp357 - tmp358
tmp360 = triton_helpers.maximum(tmp3, tmp359)
tmp361 = tmp349 * tmp360
tmp362 = tmp360 - tmp361
tmp363 = triton_helpers.maximum(tmp3, tmp362)
tmp364 = tmp349 + tmp363
tmp366 = -tmp365
tmp368 = -tmp367
tmp369 = triton_helpers.minimum(tmp366, tmp368)
tmp371 = -tmp370
tmp372 = triton_helpers.minimum(tmp369, tmp371)
tmp374 = tmp372 - tmp373
tmp375 = triton_helpers.maximum(tmp3, tmp374)
tmp376 = tmp364 * tmp375
tmp377 = tmp375 - tmp376
tmp378 = triton_helpers.maximum(tmp3, tmp377)
tmp379 = tmp364 + tmp378
tmp381 = -tmp380
tmp383 = -tmp382
tmp384 = triton_helpers.minimum(tmp381, tmp383)
tmp386 = -tmp385
tmp387 = triton_helpers.minimum(tmp384, tmp386)
tmp389 = tmp387 - tmp388
tmp390 = triton_helpers.maximum(tmp3, tmp389)
tmp391 = tmp379 * tmp390
tmp392 = tmp390 - tmp391
tmp393 = triton_helpers.maximum(tmp3, tmp392)
tmp394 = tmp379 + tmp393
tmp396 = -tmp395
tmp398 = -tmp397
tmp399 = triton_helpers.minimum(tmp396, tmp398)
tmp401 = -tmp400
tmp402 = triton_helpers.minimum(tmp399, tmp401)
tmp404 = tmp402 - tmp403
tmp405 = triton_helpers.maximum(tmp3, tmp404)
tmp406 = tmp394 * tmp405
tmp407 = tmp405 - tmp406
tmp408 = triton_helpers.maximum(tmp3, tmp407)
tmp409 = tmp394 + tmp408
tmp411 = -tmp410
tmp413 = -tmp412
tmp414 = triton_helpers.minimum(tmp411, tmp413)
tmp416 = -tmp415
tmp417 = triton_helpers.minimum(tmp414, tmp416)
tmp419 = tmp417 - tmp418
tmp420 = triton_helpers.maximum(tmp3, tmp419)
tmp421 = tmp409 * tmp420
tmp422 = tmp420 - tmp421
tmp423 = triton_helpers.maximum(tmp3, tmp422)
tmp424 = tmp409 + tmp423
tmp426 = -tmp425
tmp428 = -tmp427
tmp429 = triton_helpers.minimum(tmp426, tmp428)
tmp431 = -tmp430
tmp432 = triton_helpers.minimum(tmp429, tmp431)
tmp434 = tmp432 - tmp433
tmp435 = triton_helpers.maximum(tmp3, tmp434)
tmp436 = tmp424 * tmp435
tmp437 = tmp435 - tmp436
tmp438 = triton_helpers.maximum(tmp3, tmp437)
tmp439 = tmp424 + tmp438
tmp441 = -tmp440
tmp443 = -tmp442
tmp444 = triton_helpers.minimum(tmp441, tmp443)
tmp446 = -tmp445
tmp447 = triton_helpers.minimum(tmp444, tmp446)
tmp449 = tmp447 - tmp448
tmp450 = triton_helpers.maximum(tmp3, tmp449)
tmp451 = tmp439 * tmp450
tmp452 = tmp450 - tmp451
tmp453 = triton_helpers.maximum(tmp3, tmp452)
tmp454 = tmp439 + tmp453
tmp456 = -tmp455
tmp458 = -tmp457
tmp459 = triton_helpers.minimum(tmp456, tmp458)
tmp461 = -tmp460
tmp462 = triton_helpers.minimum(tmp459, tmp461)
tmp464 = tmp462 - tmp463
tmp465 = triton_helpers.maximum(tmp3, tmp464)
tmp466 = tmp454 * tmp465
tmp467 = tmp465 - tmp466
tmp468 = triton_helpers.maximum(tmp3, tmp467)
tmp469 = tmp454 + tmp468
tmp471 = -tmp470
tmp473 = -tmp472
tmp474 = triton_helpers.minimum(tmp471, tmp473)
tmp476 = -tmp475
tmp477 = triton_helpers.minimum(tmp474, tmp476)
tmp479 = tmp477 - tmp478
tmp480 = triton_helpers.maximum(tmp3, tmp479)
tmp481 = tmp469 * tmp480
tmp482 = tmp480 - tmp481
tmp483 = triton_helpers.maximum(tmp3, tmp482)
tmp484 = tmp469 + tmp483
tmp486 = -tmp485
tmp488 = -tmp487
tmp489 = triton_helpers.minimum(tmp486, tmp488)
tmp491 = -tmp490
tmp492 = triton_helpers.minimum(tmp489, tmp491)
tmp494 = tmp492 - tmp493
tmp495 = triton_helpers.maximum(tmp3, tmp494)
tmp496 = tmp484 * tmp495
tmp497 = tmp495 - tmp496
tmp498 = triton_helpers.maximum(tmp3, tmp497)
tmp499 = tmp484 + tmp498
tmp501 = -tmp500
tmp503 = -tmp502
tmp504 = triton_helpers.minimum(tmp501, tmp503)
tmp506 = -tmp505
tmp507 = triton_helpers.minimum(tmp504, tmp506)
tmp509 = tmp507 - tmp508
tmp510 = triton_helpers.maximum(tmp3, tmp509)
tmp511 = tmp499 * tmp510
tmp512 = tmp510 - tmp511
tmp513 = triton_helpers.maximum(tmp3, tmp512)
tmp514 = tmp499 + tmp513
tmp516 = -tmp515
tmp518 = -tmp517
tmp519 = triton_helpers.minimum(tmp516, tmp518)
tmp521 = -tmp520
tmp522 = triton_helpers.minimum(tmp519, tmp521)
tmp524 = tmp522 - tmp523
tmp525 = triton_helpers.maximum(tmp3, tmp524)
tmp526 = tmp514 * tmp525
tmp527 = tmp525 - tmp526
tmp528 = triton_helpers.maximum(tmp3, tmp527)
tmp529 = tmp514 + tmp528
tmp531 = -tmp530
tmp533 = -tmp532
tmp534 = triton_helpers.minimum(tmp531, tmp533)
tmp536 = -tmp535
tmp537 = triton_helpers.minimum(tmp534, tmp536)
tmp539 = tmp537 - tmp538
tmp540 = triton_helpers.maximum(tmp3, tmp539)
tmp541 = tmp529 * tmp540
tmp542 = tmp540 - tmp541
tmp543 = triton_helpers.maximum(tmp3, tmp542)
tmp544 = tmp529 + tmp543
tmp546 = -tmp545
tmp548 = -tmp547
tmp549 = triton_helpers.minimum(tmp546, tmp548)
tmp551 = -tmp550
tmp552 = triton_helpers.minimum(tmp549, tmp551)
tmp554 = tmp552 - tmp553
tmp555 = triton_helpers.maximum(tmp3, tmp554)
tmp556 = tmp544 * tmp555
tmp557 = tmp555 - tmp556
tmp558 = triton_helpers.maximum(tmp3, tmp557)
tmp559 = tmp544 + tmp558
tmp561 = -tmp560
tmp563 = -tmp562
tmp564 = triton_helpers.minimum(tmp561, tmp563)
tmp566 = -tmp565
tmp567 = triton_helpers.minimum(tmp564, tmp566)
tmp569 = tmp567 - tmp568
tmp570 = triton_helpers.maximum(tmp3, tmp569)
tmp571 = tmp559 * tmp570
tmp572 = tmp570 - tmp571
tmp573 = triton_helpers.maximum(tmp3, tmp572)
tmp574 = tmp559 + tmp573
tmp576 = -tmp575
tmp578 = -tmp577
tmp579 = triton_helpers.minimum(tmp576, tmp578)
tmp581 = -tmp580
tmp582 = triton_helpers.minimum(tmp579, tmp581)
tmp584 = tmp582 - tmp583
tmp585 = triton_helpers.maximum(tmp3, tmp584)
tmp586 = tmp574 * tmp585
tmp587 = tmp585 - tmp586
tmp588 = triton_helpers.maximum(tmp3, tmp587)
tmp589 = tmp574 + tmp588
tmp591 = -tmp590
tmp593 = -tmp592
tmp594 = triton_helpers.minimum(tmp591, tmp593)
tmp596 = -tmp595
tmp597 = triton_helpers.minimum(tmp594, tmp596)
tmp599 = tmp597 - tmp598
tmp600 = triton_helpers.maximum(tmp3, tmp599)
tmp601 = tmp589 * tmp600
tmp602 = tmp600 - tmp601
tmp603 = triton_helpers.maximum(tmp3, tmp602)
tmp604 = tmp589 + tmp603
tmp606 = -tmp605
tmp608 = -tmp607
tmp609 = triton_helpers.minimum(tmp606, tmp608)
tmp611 = -tmp610
tmp612 = triton_helpers.minimum(tmp609, tmp611)
tmp614 = tmp612 - tmp613
tmp615 = triton_helpers.maximum(tmp3, tmp614)
tmp616 = tmp604 * tmp615
tmp617 = tmp615 - tmp616
tmp618 = triton_helpers.maximum(tmp3, tmp617)
tmp619 = tmp604 + tmp618
tmp621 = -tmp620
tmp623 = -tmp622
tmp624 = triton_helpers.minimum(tmp621, tmp623)
tmp626 = -tmp625
tmp627 = triton_helpers.minimum(tmp624, tmp626)
tmp629 = tmp627 - tmp628
tmp630 = triton_helpers.maximum(tmp3, tmp629)
tmp631 = tmp619 * tmp630
tmp632 = tmp630 - tmp631
tmp633 = triton_helpers.maximum(tmp3, tmp632)
tmp634 = tmp619 + tmp633
tmp636 = -tmp635
tmp638 = -tmp637
tmp639 = triton_helpers.minimum(tmp636, tmp638)
tmp641 = -tmp640
tmp642 = triton_helpers.minimum(tmp639, tmp641)
tmp644 = tmp642 - tmp643
tmp645 = triton_helpers.maximum(tmp3, tmp644)
tmp646 = tmp634 * tmp645
tmp647 = tmp645 - tmp646
tmp648 = triton_helpers.maximum(tmp3, tmp647)
tmp649 = tmp634 + tmp648
tmp651 = -tmp650
tmp653 = -tmp652
tmp654 = triton_helpers.minimum(tmp651, tmp653)
tmp656 = -tmp655
tmp657 = triton_helpers.minimum(tmp654, tmp656)
tmp659 = tmp657 - tmp658
tmp660 = triton_helpers.maximum(tmp3, tmp659)
tmp661 = tmp649 * tmp660
tmp662 = tmp660 - tmp661
tmp663 = triton_helpers.maximum(tmp3, tmp662)
tmp664 = tmp649 + tmp663
tmp666 = -tmp665
tmp668 = -tmp667
tmp669 = triton_helpers.minimum(tmp666, tmp668)
tmp671 = -tmp670
tmp672 = triton_helpers.minimum(tmp669, tmp671)
tmp674 = tmp672 - tmp673
tmp675 = triton_helpers.maximum(tmp3, tmp674)
tmp676 = tmp664 * tmp675
tmp677 = tmp675 - tmp676
tmp678 = triton_helpers.maximum(tmp3, tmp677)
tmp679 = tmp664 + tmp678
tmp681 = -tmp680
tmp683 = -tmp682
tmp684 = triton_helpers.minimum(tmp681, tmp683)
tmp686 = -tmp685
tmp687 = triton_helpers.minimum(tmp684, tmp686)
tmp689 = tmp687 - tmp688
tmp690 = triton_helpers.maximum(tmp3, tmp689)
tmp691 = tmp679 * tmp690
tmp692 = tmp690 - tmp691
tmp693 = triton_helpers.maximum(tmp3, tmp692)
tmp694 = tmp679 + tmp693
tmp696 = -tmp695
tmp698 = -tmp697
tmp699 = triton_helpers.minimum(tmp696, tmp698)
tmp701 = -tmp700
tmp702 = triton_helpers.minimum(tmp699, tmp701)
tmp704 = tmp702 - tmp703
tmp705 = triton_helpers.maximum(tmp3, tmp704)
tmp706 = tmp694 * tmp705
tmp707 = tmp705 - tmp706
tmp708 = triton_helpers.maximum(tmp3, tmp707)
tmp709 = tmp694 + tmp708
tmp711 = -tmp710
tmp713 = -tmp712
tmp714 = triton_helpers.minimum(tmp711, tmp713)
tmp716 = -tmp715
tmp717 = triton_helpers.minimum(tmp714, tmp716)
tmp719 = tmp717 - tmp718
tmp720 = triton_helpers.maximum(tmp3, tmp719)
tmp721 = tmp709 * tmp720
tmp722 = tmp720 - tmp721
tmp723 = triton_helpers.maximum(tmp3, tmp722)
tmp724 = tmp709 + tmp723
tmp726 = -tmp725
tmp728 = -tmp727
tmp729 = triton_helpers.minimum(tmp726, tmp728)
tmp731 = -tmp730
tmp732 = triton_helpers.minimum(tmp729, tmp731)
tmp734 = tmp732 - tmp733
tmp735 = triton_helpers.maximum(tmp3, tmp734)
tmp736 = tmp724 * tmp735
tmp737 = tmp735 - tmp736
tmp738 = triton_helpers.maximum(tmp3, tmp737)
tmp739 = tmp724 + tmp738
tmp741 = -tmp740
tmp743 = -tmp742
tmp744 = triton_helpers.minimum(tmp741, tmp743)
tmp746 = -tmp745
tmp747 = triton_helpers.minimum(tmp744, tmp746)
tmp749 = tmp747 - tmp748
tmp750 = triton_helpers.maximum(tmp3, tmp749)
tmp751 = tmp739 * tmp750
tmp752 = tmp750 - tmp751
tmp753 = triton_helpers.maximum(tmp3, tmp752)
tmp754 = tmp739 + tmp753
tl.store(in_out_ptr49 + x0, tmp754, xmask)
@triton.jit
def triton_poi_fused_add_minimum_mul_neg_relu_sub_6(in_out_ptr0,
in_out_ptr1, in_out_ptr2, in_out_ptr3, in_out_ptr4, in_out_ptr5,
in_out_ptr6, in_out_ptr7, in_out_ptr8, in_out_ptr9, in_out_ptr10,
in_out_ptr11, in_out_ptr12, in_out_ptr13, in_out_ptr14, in_out_ptr15,
in_out_ptr16, in_out_ptr17, in_out_ptr18, in_out_ptr19, in_out_ptr20,
in_out_ptr21, in_out_ptr22, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11,
in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18,
in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24, in_ptr25,
in_ptr26, in_ptr27, in_ptr28, in_ptr29, in_ptr30, in_ptr31, in_ptr32,
in_ptr33, in_ptr34, in_ptr35, in_ptr36, in_ptr37, in_ptr38, in_ptr39,
in_ptr40, in_ptr41, in_ptr42, in_ptr43, in_ptr44, in_ptr45, in_ptr46,
in_ptr47, in_ptr48, in_ptr49, in_ptr50, in_ptr51, in_ptr52, in_ptr53,
in_ptr54, in_ptr55, in_ptr56, in_ptr57, in_ptr58, in_ptr59, in_ptr60,
in_ptr61, in_ptr62, in_ptr63, in_ptr64, in_ptr65, in_ptr66, in_ptr67,
in_ptr68, in_ptr69, in_ptr70, in_ptr71, in_ptr72, in_ptr73, in_ptr74,
in_ptr75, in_ptr76, in_ptr77, in_ptr78, in_ptr79, in_ptr80, in_ptr81,
in_ptr82, in_ptr83, in_ptr84, in_ptr85, in_ptr86, in_ptr87, in_ptr88,
in_ptr89, in_ptr90, in_ptr91, in_ptr92, in_ptr93, in_ptr94, in_ptr95,
in_ptr96, in_ptr97, in_ptr98, in_ptr99, in_ptr100, in_ptr101, in_ptr102,
in_ptr103, in_ptr104, in_ptr105, in_ptr106, in_ptr107, in_ptr108,
in_ptr109, in_ptr110, in_ptr111, in_ptr112, in_ptr113, in_ptr114,
in_ptr115, in_ptr116, in_ptr117, in_ptr118, in_ptr119, in_ptr120,
in_ptr121, in_ptr122, in_ptr123, in_ptr124, in_ptr125, in_ptr126,
in_ptr127, in_ptr128, in_ptr129, in_ptr130, in_ptr131, in_ptr132,
in_ptr133, in_ptr134, in_ptr135, in_ptr136, in_ptr137, in_ptr138,
in_ptr139, in_ptr140, in_ptr141, in_ptr142, in_ptr143, in_ptr144,
in_ptr145, in_ptr146, in_ptr147, in_ptr148, in_ptr149, in_ptr150,
in_ptr151, in_ptr152, in_ptr153, in_ptr154, in_ptr155, in_ptr156,
in_ptr157, in_ptr158, in_ptr159, in_ptr160, in_ptr161, in_ptr162,
in_ptr163, in_ptr164, in_ptr165, in_ptr166, in_ptr167, in_ptr168,
in_ptr169, in_ptr170, in_ptr171, in_ptr172, in_ptr173, in_ptr174,
in_ptr175, in_ptr176, in_ptr177, in_ptr178, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_out_ptr0 + x0, xmask)
tmp5 = tl.load(in_ptr1 + x0, xmask)
tmp7 = tl.load(in_ptr2 + x0, xmask)
tmp10 = tl.load(in_ptr3 + x0, xmask)
tmp13 = tl.load(in_ptr4 + x0, xmask)
tmp20 = tl.load(in_ptr5 + x0, xmask)
tmp22 = tl.load(in_ptr6 + x0, xmask)
tmp25 = tl.load(in_ptr7 + x0, xmask)
tmp28 = tl.load(in_ptr8 + x0, xmask)
tmp35 = tl.load(in_ptr9 + x0, xmask)
tmp37 = tl.load(in_ptr10 + x0, xmask)
tmp40 = tl.load(in_ptr11 + x0, xmask)
tmp43 = tl.load(in_ptr12 + x0, xmask)
tmp50 = tl.load(in_out_ptr1 + x0, xmask)
tmp52 = tl.load(in_ptr13 + x0, xmask)
tmp55 = tl.load(in_ptr14 + x0, xmask)
tmp58 = tl.load(in_ptr15 + x0, xmask)
tmp65 = tl.load(in_out_ptr2 + x0, xmask)
tmp67 = tl.load(in_ptr16 + x0, xmask)
tmp70 = tl.load(in_ptr17 + x0, xmask)
tmp73 = tl.load(in_ptr18 + x0, xmask)
tmp80 = tl.load(in_out_ptr3 + x0, xmask)
tmp82 = tl.load(in_ptr19 + x0, xmask)
tmp85 = tl.load(in_ptr20 + x0, xmask)
tmp88 = tl.load(in_ptr21 + x0, xmask)
tmp95 = tl.load(in_out_ptr4 + x0, xmask)
tmp97 = tl.load(in_ptr22 + x0, xmask)
tmp100 = tl.load(in_ptr23 + x0, xmask)
tmp103 = tl.load(in_ptr24 + x0, xmask)
tmp110 = tl.load(in_out_ptr5 + x0, xmask)
tmp112 = tl.load(in_ptr25 + x0, xmask)
tmp115 = tl.load(in_ptr26 + x0, xmask)
tmp118 = tl.load(in_ptr27 + x0, xmask)
tmp125 = tl.load(in_out_ptr6 + x0, xmask)
tmp127 = tl.load(in_ptr28 + x0, xmask)
tmp130 = tl.load(in_ptr29 + x0, xmask)
tmp133 = tl.load(in_ptr30 + x0, xmask)
tmp140 = tl.load(in_out_ptr7 + x0, xmask)
tmp142 = tl.load(in_ptr31 + x0, xmask)
tmp145 = tl.load(in_ptr32 + x0, xmask)
tmp148 = tl.load(in_ptr33 + x0, xmask)
tmp155 = tl.load(in_ptr34 + x0, xmask)
tmp157 = tl.load(in_ptr35 + x0, xmask)
tmp160 = tl.load(in_ptr36 + x0, xmask)
tmp163 = tl.load(in_ptr37 + x0, xmask)
tmp170 = tl.load(in_ptr38 + x0, xmask)
tmp172 = tl.load(in_ptr39 + x0, xmask)
tmp175 = tl.load(in_ptr40 + x0, xmask)
tmp178 = tl.load(in_ptr41 + x0, xmask)
tmp185 = tl.load(in_ptr42 + x0, xmask)
tmp187 = tl.load(in_ptr43 + x0, xmask)
tmp190 = tl.load(in_ptr44 + x0, xmask)
tmp193 = tl.load(in_ptr45 + x0, xmask)
tmp200 = tl.load(in_ptr46 + x0, xmask)
tmp202 = tl.load(in_ptr47 + x0, xmask)
tmp205 = tl.load(in_ptr48 + x0, xmask)
tmp208 = tl.load(in_ptr49 + x0, xmask)
tmp215 = tl.load(in_ptr50 + x0, xmask)
tmp217 = tl.load(in_ptr51 + x0, xmask)
tmp220 = tl.load(in_ptr52 + x0, xmask)
tmp223 = tl.load(in_ptr53 + x0, xmask)
tmp230 = tl.load(in_ptr54 + x0, xmask)
tmp232 = tl.load(in_ptr55 + x0, xmask)
tmp235 = tl.load(in_ptr56 + x0, xmask)
tmp238 = tl.load(in_ptr57 + x0, xmask)
tmp245 = tl.load(in_ptr58 + x0, xmask)
tmp247 = tl.load(in_ptr59 + x0, xmask)
tmp250 = tl.load(in_ptr60 + x0, xmask)
tmp253 = tl.load(in_ptr61 + x0, xmask)
tmp260 = tl.load(in_ptr62 + x0, xmask)
tmp262 = tl.load(in_ptr63 + x0, xmask)
tmp265 = tl.load(in_ptr64 + x0, xmask)
tmp268 = tl.load(in_ptr65 + x0, xmask)
tmp275 = tl.load(in_ptr66 + x0, xmask)
tmp277 = tl.load(in_ptr67 + x0, xmask)
tmp280 = tl.load(in_ptr68 + x0, xmask)
tmp283 = tl.load(in_ptr69 + x0, xmask)
tmp290 = tl.load(in_ptr70 + x0, xmask)
tmp292 = tl.load(in_ptr71 + x0, xmask)
tmp295 = tl.load(in_ptr72 + x0, xmask)
tmp298 = tl.load(in_ptr73 + x0, xmask)
tmp305 = tl.load(in_ptr74 + x0, xmask)
tmp307 = tl.load(in_ptr75 + x0, xmask)
tmp310 = tl.load(in_ptr76 + x0, xmask)
tmp313 = tl.load(in_ptr77 + x0, xmask)
tmp320 = tl.load(in_ptr78 + x0, xmask)
tmp322 = tl.load(in_ptr79 + x0, xmask)
tmp325 = tl.load(in_ptr80 + x0, xmask)
tmp328 = tl.load(in_ptr81 + x0, xmask)
tmp335 = tl.load(in_ptr82 + x0, xmask)
tmp337 = tl.load(in_ptr83 + x0, xmask)
tmp340 = tl.load(in_ptr84 + x0, xmask)
tmp343 = tl.load(in_ptr85 + x0, xmask)
tmp350 = tl.load(in_ptr86 + x0, xmask)
tmp352 = tl.load(in_ptr87 + x0, xmask)
tmp355 = tl.load(in_ptr88 + x0, xmask)
tmp358 = tl.load(in_ptr89 + x0, xmask)
tmp365 = tl.load(in_ptr90 + x0, xmask)
tmp367 = tl.load(in_ptr91 + x0, xmask)
tmp370 = tl.load(in_ptr92 + x0, xmask)
tmp373 = tl.load(in_ptr93 + x0, xmask)
tmp380 = tl.load(in_ptr94 + x0, xmask)
tmp382 = tl.load(in_ptr95 + x0, xmask)
tmp385 = tl.load(in_ptr96 + x0, xmask)
tmp388 = tl.load(in_ptr97 + x0, xmask)
tmp395 = tl.load(in_ptr98 + x0, xmask)
tmp397 = tl.load(in_ptr99 + x0, xmask)
tmp400 = tl.load(in_ptr100 + x0, xmask)
tmp403 = tl.load(in_ptr101 + x0, xmask)
tmp410 = tl.load(in_ptr102 + x0, xmask)
tmp412 = tl.load(in_ptr103 + x0, xmask)
tmp415 = tl.load(in_ptr104 + x0, xmask)
tmp418 = tl.load(in_ptr105 + x0, xmask)
tmp425 = tl.load(in_ptr106 + x0, xmask)
tmp427 = tl.load(in_ptr107 + x0, xmask)
tmp430 = tl.load(in_ptr108 + x0, xmask)
tmp433 = tl.load(in_ptr109 + x0, xmask)
tmp440 = tl.load(in_ptr110 + x0, xmask)
tmp442 = tl.load(in_ptr111 + x0, xmask)
tmp445 = tl.load(in_ptr112 + x0, xmask)
tmp448 = tl.load(in_ptr113 + x0, xmask)
tmp455 = tl.load(in_ptr114 + x0, xmask)
tmp457 = tl.load(in_ptr115 + x0, xmask)
tmp460 = tl.load(in_ptr116 + x0, xmask)
tmp463 = tl.load(in_ptr117 + x0, xmask)
tmp470 = tl.load(in_ptr118 + x0, xmask)
tmp472 = tl.load(in_ptr119 + x0, xmask)
tmp475 = tl.load(in_ptr120 + x0, xmask)
tmp478 = tl.load(in_ptr121 + x0, xmask)
tmp485 = tl.load(in_ptr122 + x0, xmask)
tmp487 = tl.load(in_ptr123 + x0, xmask)
tmp490 = tl.load(in_ptr124 + x0, xmask)
tmp493 = tl.load(in_ptr125 + x0, xmask)
tmp500 = tl.load(in_ptr126 + x0, xmask)
tmp502 = tl.load(in_ptr127 + x0, xmask)
tmp505 = tl.load(in_ptr128 + x0, xmask)
tmp508 = tl.load(in_ptr129 + x0, xmask)
tmp515 = tl.load(in_ptr130 + x0, xmask)
tmp517 = tl.load(in_ptr131 + x0, xmask)
tmp520 = tl.load(in_ptr132 + x0, xmask)
tmp523 = tl.load(in_ptr133 + x0, xmask)
tmp530 = tl.load(in_ptr134 + x0, xmask)
tmp532 = tl.load(in_ptr135 + x0, xmask)
tmp535 = tl.load(in_out_ptr8 + x0, xmask)
tmp538 = tl.load(in_ptr136 + x0, xmask)
tmp545 = tl.load(in_out_ptr9 + x0, xmask)
tmp547 = tl.load(in_ptr137 + x0, xmask)
tmp550 = tl.load(in_ptr138 + x0, xmask)
tmp553 = tl.load(in_ptr139 + x0, xmask)
tmp560 = tl.load(in_out_ptr10 + x0, xmask)
tmp562 = tl.load(in_ptr140 + x0, xmask)
tmp565 = tl.load(in_ptr141 + x0, xmask)
tmp568 = tl.load(in_ptr142 + x0, xmask)
tmp575 = tl.load(in_out_ptr11 + x0, xmask)
tmp577 = tl.load(in_ptr143 + x0, xmask)
tmp580 = tl.load(in_ptr144 + x0, xmask)
tmp583 = tl.load(in_ptr145 + x0, xmask)
tmp590 = tl.load(in_out_ptr12 + x0, xmask)
tmp592 = tl.load(in_ptr146 + x0, xmask)
tmp595 = tl.load(in_ptr147 + x0, xmask)
tmp598 = tl.load(in_ptr148 + x0, xmask)
tmp605 = tl.load(in_out_ptr13 + x0, xmask)
tmp607 = tl.load(in_ptr149 + x0, xmask)
tmp610 = tl.load(in_ptr150 + x0, xmask)
tmp613 = tl.load(in_ptr151 + x0, xmask)
tmp620 = tl.load(in_out_ptr14 + x0, xmask)
tmp622 = tl.load(in_ptr152 + x0, xmask)
tmp625 = tl.load(in_ptr153 + x0, xmask)
tmp628 = tl.load(in_ptr154 + x0, xmask)
tmp635 = tl.load(in_out_ptr15 + x0, xmask)
tmp637 = tl.load(in_ptr155 + x0, xmask)
tmp640 = tl.load(in_ptr156 + x0, xmask)
tmp643 = tl.load(in_ptr157 + x0, xmask)
tmp650 = tl.load(in_out_ptr16 + x0, xmask)
tmp652 = tl.load(in_ptr158 + x0, xmask)
tmp655 = tl.load(in_ptr159 + x0, xmask)
tmp658 = tl.load(in_ptr160 + x0, xmask)
tmp665 = tl.load(in_out_ptr17 + x0, xmask)
tmp667 = tl.load(in_ptr161 + x0, xmask)
tmp670 = tl.load(in_ptr162 + x0, xmask)
tmp673 = tl.load(in_ptr163 + x0, xmask)
tmp680 = tl.load(in_out_ptr18 + x0, xmask)
tmp682 = tl.load(in_ptr164 + x0, xmask)
tmp685 = tl.load(in_ptr165 + x0, xmask)
tmp688 = tl.load(in_ptr166 + x0, xmask)
tmp695 = tl.load(in_out_ptr19 + x0, xmask)
tmp697 = tl.load(in_ptr167 + x0, xmask)
tmp700 = tl.load(in_ptr168 + x0, xmask)
tmp703 = tl.load(in_ptr169 + x0, xmask)
tmp710 = tl.load(in_out_ptr20 + x0, xmask)
tmp712 = tl.load(in_ptr170 + x0, xmask)
tmp715 = tl.load(in_ptr171 + x0, xmask)
tmp718 = tl.load(in_ptr172 + x0, xmask)
tmp725 = tl.load(in_out_ptr21 + x0, xmask)
tmp727 = tl.load(in_ptr173 + x0, xmask)
tmp730 = tl.load(in_ptr174 + x0, xmask)
tmp733 = tl.load(in_ptr175 + x0, xmask)
tmp740 = tl.load(in_out_ptr22 + x0, xmask)
tmp742 = tl.load(in_ptr176 + x0, xmask)
tmp745 = tl.load(in_ptr177 + x0, xmask)
tmp748 = tl.load(in_ptr178 + x0, xmask)
tmp2 = tmp0 - tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = -tmp5
tmp8 = -tmp7
tmp9 = triton_helpers.minimum(tmp6, tmp8)
tmp11 = -tmp10
tmp12 = triton_helpers.minimum(tmp9, tmp11)
tmp14 = tmp12 - tmp13
tmp15 = triton_helpers.maximum(tmp3, tmp14)
tmp16 = tmp4 * tmp15
tmp17 = tmp15 - tmp16
tmp18 = triton_helpers.maximum(tmp3, tmp17)
tmp19 = tmp4 + tmp18
tmp21 = -tmp20
tmp23 = -tmp22
tmp24 = triton_helpers.minimum(tmp21, tmp23)
tmp26 = -tmp25
tmp27 = triton_helpers.minimum(tmp24, tmp26)
tmp29 = tmp27 - tmp28
tmp30 = triton_helpers.maximum(tmp3, tmp29)
tmp31 = tmp19 * tmp30
tmp32 = tmp30 - tmp31
tmp33 = triton_helpers.maximum(tmp3, tmp32)
tmp34 = tmp19 + tmp33
tmp36 = -tmp35
tmp38 = -tmp37
tmp39 = triton_helpers.minimum(tmp36, tmp38)
tmp41 = -tmp40
tmp42 = triton_helpers.minimum(tmp39, tmp41)
tmp44 = tmp42 - tmp43
tmp45 = triton_helpers.maximum(tmp3, tmp44)
tmp46 = tmp34 * tmp45
tmp47 = tmp45 - tmp46
tmp48 = triton_helpers.maximum(tmp3, tmp47)
tmp49 = tmp34 + tmp48
tmp51 = -tmp50
tmp53 = -tmp52
tmp54 = triton_helpers.minimum(tmp51, tmp53)
tmp56 = -tmp55
tmp57 = triton_helpers.minimum(tmp54, tmp56)
tmp59 = tmp57 - tmp58
tmp60 = triton_helpers.maximum(tmp3, tmp59)
tmp61 = tmp49 * tmp60
tmp62 = tmp60 - tmp61
tmp63 = triton_helpers.maximum(tmp3, tmp62)
tmp64 = tmp49 + tmp63
tmp66 = -tmp65
tmp68 = -tmp67
tmp69 = triton_helpers.minimum(tmp66, tmp68)
tmp71 = -tmp70
tmp72 = triton_helpers.minimum(tmp69, tmp71)
tmp74 = tmp72 - tmp73
tmp75 = triton_helpers.maximum(tmp3, tmp74)
tmp76 = tmp64 * tmp75
tmp77 = tmp75 - tmp76
tmp78 = triton_helpers.maximum(tmp3, tmp77)
tmp79 = tmp64 + tmp78
tmp81 = -tmp80
tmp83 = -tmp82
tmp84 = triton_helpers.minimum(tmp81, tmp83)
tmp86 = -tmp85
tmp87 = triton_helpers.minimum(tmp84, tmp86)
tmp89 = tmp87 - tmp88
tmp90 = triton_helpers.maximum(tmp3, tmp89)
tmp91 = tmp79 * tmp90
tmp92 = tmp90 - tmp91
tmp93 = triton_helpers.maximum(tmp3, tmp92)
tmp94 = tmp79 + tmp93
tmp96 = -tmp95
tmp98 = -tmp97
tmp99 = triton_helpers.minimum(tmp96, tmp98)
tmp101 = -tmp100
tmp102 = triton_helpers.minimum(tmp99, tmp101)
tmp104 = tmp102 - tmp103
tmp105 = triton_helpers.maximum(tmp3, tmp104)
tmp106 = tmp94 * tmp105
tmp107 = tmp105 - tmp106
tmp108 = triton_helpers.maximum(tmp3, tmp107)
tmp109 = tmp94 + tmp108
tmp111 = -tmp110
tmp113 = -tmp112
tmp114 = triton_helpers.minimum(tmp111, tmp113)
tmp116 = -tmp115
tmp117 = triton_helpers.minimum(tmp114, tmp116)
tmp119 = tmp117 - tmp118
tmp120 = triton_helpers.maximum(tmp3, tmp119)
tmp121 = tmp109 * tmp120
tmp122 = tmp120 - tmp121
tmp123 = triton_helpers.maximum(tmp3, tmp122)
tmp124 = tmp109 + tmp123
tmp126 = -tmp125
tmp128 = -tmp127
tmp129 = triton_helpers.minimum(tmp126, tmp128)
tmp131 = -tmp130
tmp132 = triton_helpers.minimum(tmp129, tmp131)
tmp134 = tmp132 - tmp133
tmp135 = triton_helpers.maximum(tmp3, tmp134)
tmp136 = tmp124 * tmp135
tmp137 = tmp135 - tmp136
tmp138 = triton_helpers.maximum(tmp3, tmp137)
tmp139 = tmp124 + tmp138
tmp141 = -tmp140
tmp143 = -tmp142
tmp144 = triton_helpers.minimum(tmp141, tmp143)
tmp146 = -tmp145
tmp147 = triton_helpers.minimum(tmp144, tmp146)
tmp149 = tmp147 - tmp148
tmp150 = triton_helpers.maximum(tmp3, tmp149)
tmp151 = tmp139 * tmp150
tmp152 = tmp150 - tmp151
tmp153 = triton_helpers.maximum(tmp3, tmp152)
tmp154 = tmp139 + tmp153
tmp156 = -tmp155
tmp158 = -tmp157
tmp159 = triton_helpers.minimum(tmp156, tmp158)
tmp161 = -tmp160
tmp162 = triton_helpers.minimum(tmp159, tmp161)
tmp164 = tmp162 - tmp163
tmp165 = triton_helpers.maximum(tmp3, tmp164)
tmp166 = tmp154 * tmp165
tmp167 = tmp165 - tmp166
tmp168 = triton_helpers.maximum(tmp3, tmp167)
tmp169 = tmp154 + tmp168
tmp171 = -tmp170
tmp173 = -tmp172
tmp174 = triton_helpers.minimum(tmp171, tmp173)
tmp176 = -tmp175
tmp177 = triton_helpers.minimum(tmp174, tmp176)
tmp179 = tmp177 - tmp178
tmp180 = triton_helpers.maximum(tmp3, tmp179)
tmp181 = tmp169 * tmp180
tmp182 = tmp180 - tmp181
tmp183 = triton_helpers.maximum(tmp3, tmp182)
tmp184 = tmp169 + tmp183
tmp186 = -tmp185
tmp188 = -tmp187
tmp189 = triton_helpers.minimum(tmp186, tmp188)
tmp191 = -tmp190
tmp192 = triton_helpers.minimum(tmp189, tmp191)
tmp194 = tmp192 - tmp193
tmp195 = triton_helpers.maximum(tmp3, tmp194)
tmp196 = tmp184 * tmp195
tmp197 = tmp195 - tmp196
tmp198 = triton_helpers.maximum(tmp3, tmp197)
tmp199 = tmp184 + tmp198
tmp201 = -tmp200
tmp203 = -tmp202
tmp204 = triton_helpers.minimum(tmp201, tmp203)
tmp206 = -tmp205
tmp207 = triton_helpers.minimum(tmp204, tmp206)
tmp209 = tmp207 - tmp208
tmp210 = triton_helpers.maximum(tmp3, tmp209)
tmp211 = tmp199 * tmp210
tmp212 = tmp210 - tmp211
tmp213 = triton_helpers.maximum(tmp3, tmp212)
tmp214 = tmp199 + tmp213
tmp216 = -tmp215
tmp218 = -tmp217
tmp219 = triton_helpers.minimum(tmp216, tmp218)
tmp221 = -tmp220
tmp222 = triton_helpers.minimum(tmp219, tmp221)
tmp224 = tmp222 - tmp223
tmp225 = triton_helpers.maximum(tmp3, tmp224)
tmp226 = tmp214 * tmp225
tmp227 = tmp225 - tmp226
tmp228 = triton_helpers.maximum(tmp3, tmp227)
tmp229 = tmp214 + tmp228
tmp231 = -tmp230
tmp233 = -tmp232
tmp234 = triton_helpers.minimum(tmp231, tmp233)
tmp236 = -tmp235
tmp237 = triton_helpers.minimum(tmp234, tmp236)
tmp239 = tmp237 - tmp238
tmp240 = triton_helpers.maximum(tmp3, tmp239)
tmp241 = tmp229 * tmp240
tmp242 = tmp240 - tmp241
tmp243 = triton_helpers.maximum(tmp3, tmp242)
tmp244 = tmp229 + tmp243
tmp246 = -tmp245
tmp248 = -tmp247
tmp249 = triton_helpers.minimum(tmp246, tmp248)
tmp251 = -tmp250
tmp252 = triton_helpers.minimum(tmp249, tmp251)
tmp254 = tmp252 - tmp253
tmp255 = triton_helpers.maximum(tmp3, tmp254)
tmp256 = tmp244 * tmp255
tmp257 = tmp255 - tmp256
tmp258 = triton_helpers.maximum(tmp3, tmp257)
tmp259 = tmp244 + tmp258
tmp261 = -tmp260
tmp263 = -tmp262
tmp264 = triton_helpers.minimum(tmp261, tmp263)
tmp266 = -tmp265
tmp267 = triton_helpers.minimum(tmp264, tmp266)
tmp269 = tmp267 - tmp268
tmp270 = triton_helpers.maximum(tmp3, tmp269)
tmp271 = tmp259 * tmp270
tmp272 = tmp270 - tmp271
tmp273 = triton_helpers.maximum(tmp3, tmp272)
tmp274 = tmp259 + tmp273
tmp276 = -tmp275
tmp278 = -tmp277
tmp279 = triton_helpers.minimum(tmp276, tmp278)
tmp281 = -tmp280
tmp282 = triton_helpers.minimum(tmp279, tmp281)
tmp284 = tmp282 - tmp283
tmp285 = triton_helpers.maximum(tmp3, tmp284)
tmp286 = tmp274 * tmp285
tmp287 = tmp285 - tmp286
tmp288 = triton_helpers.maximum(tmp3, tmp287)
tmp289 = tmp274 + tmp288
tmp291 = -tmp290
tmp293 = -tmp292
tmp294 = triton_helpers.minimum(tmp291, tmp293)
tmp296 = -tmp295
tmp297 = triton_helpers.minimum(tmp294, tmp296)
tmp299 = tmp297 - tmp298
tmp300 = triton_helpers.maximum(tmp3, tmp299)
tmp301 = tmp289 * tmp300
tmp302 = tmp300 - tmp301
tmp303 = triton_helpers.maximum(tmp3, tmp302)
tmp304 = tmp289 + tmp303
tmp306 = -tmp305
tmp308 = -tmp307
tmp309 = triton_helpers.minimum(tmp306, tmp308)
tmp311 = -tmp310
tmp312 = triton_helpers.minimum(tmp309, tmp311)
tmp314 = tmp312 - tmp313
tmp315 = triton_helpers.maximum(tmp3, tmp314)
tmp316 = tmp304 * tmp315
tmp317 = tmp315 - tmp316
tmp318 = triton_helpers.maximum(tmp3, tmp317)
tmp319 = tmp304 + tmp318
tmp321 = -tmp320
tmp323 = -tmp322
tmp324 = triton_helpers.minimum(tmp321, tmp323)
tmp326 = -tmp325
tmp327 = triton_helpers.minimum(tmp324, tmp326)
tmp329 = tmp327 - tmp328
tmp330 = triton_helpers.maximum(tmp3, tmp329)
tmp331 = tmp319 * tmp330
tmp332 = tmp330 - tmp331
tmp333 = triton_helpers.maximum(tmp3, tmp332)
tmp334 = tmp319 + tmp333
tmp336 = -tmp335
tmp338 = -tmp337
tmp339 = triton_helpers.minimum(tmp336, tmp338)
tmp341 = -tmp340
tmp342 = triton_helpers.minimum(tmp339, tmp341)
tmp344 = tmp342 - tmp343
tmp345 = triton_helpers.maximum(tmp3, tmp344)
tmp346 = tmp334 * tmp345
tmp347 = tmp345 - tmp346
tmp348 = triton_helpers.maximum(tmp3, tmp347)
tmp349 = tmp334 + tmp348
tmp351 = -tmp350
tmp353 = -tmp352
tmp354 = triton_helpers.minimum(tmp351, tmp353)
tmp356 = -tmp355
tmp357 = triton_helpers.minimum(tmp354, tmp356)
tmp359 = tmp357 - tmp358
tmp360 = triton_helpers.maximum(tmp3, tmp359)
tmp361 = tmp349 * tmp360
tmp362 = tmp360 - tmp361
tmp363 = triton_helpers.maximum(tmp3, tmp362)
tmp364 = tmp349 + tmp363
tmp366 = -tmp365
tmp368 = -tmp367
tmp369 = triton_helpers.minimum(tmp366, tmp368)
tmp371 = -tmp370
tmp372 = triton_helpers.minimum(tmp369, tmp371)
tmp374 = tmp372 - tmp373
tmp375 = triton_helpers.maximum(tmp3, tmp374)
tmp376 = tmp364 * tmp375
tmp377 = tmp375 - tmp376
tmp378 = triton_helpers.maximum(tmp3, tmp377)
tmp379 = tmp364 + tmp378
tmp381 = -tmp380
tmp383 = -tmp382
tmp384 = triton_helpers.minimum(tmp381, tmp383)
tmp386 = -tmp385
tmp387 = triton_helpers.minimum(tmp384, tmp386)
tmp389 = tmp387 - tmp388
tmp390 = triton_helpers.maximum(tmp3, tmp389)
tmp391 = tmp379 * tmp390
tmp392 = tmp390 - tmp391
tmp393 = triton_helpers.maximum(tmp3, tmp392)
tmp394 = tmp379 + tmp393
tmp396 = -tmp395
tmp398 = -tmp397
tmp399 = triton_helpers.minimum(tmp396, tmp398)
tmp401 = -tmp400
tmp402 = triton_helpers.minimum(tmp399, tmp401)
tmp404 = tmp402 - tmp403
tmp405 = triton_helpers.maximum(tmp3, tmp404)
tmp406 = tmp394 * tmp405
tmp407 = tmp405 - tmp406
tmp408 = triton_helpers.maximum(tmp3, tmp407)
tmp409 = tmp394 + tmp408
tmp411 = -tmp410
tmp413 = -tmp412
tmp414 = triton_helpers.minimum(tmp411, tmp413)
tmp416 = -tmp415
tmp417 = triton_helpers.minimum(tmp414, tmp416)
tmp419 = tmp417 - tmp418
tmp420 = triton_helpers.maximum(tmp3, tmp419)
tmp421 = tmp409 * tmp420
tmp422 = tmp420 - tmp421
tmp423 = triton_helpers.maximum(tmp3, tmp422)
tmp424 = tmp409 + tmp423
tmp426 = -tmp425
tmp428 = -tmp427
tmp429 = triton_helpers.minimum(tmp426, tmp428)
tmp431 = -tmp430
tmp432 = triton_helpers.minimum(tmp429, tmp431)
tmp434 = tmp432 - tmp433
tmp435 = triton_helpers.maximum(tmp3, tmp434)
tmp436 = tmp424 * tmp435
tmp437 = tmp435 - tmp436
tmp438 = triton_helpers.maximum(tmp3, tmp437)
tmp439 = tmp424 + tmp438
tmp441 = -tmp440
tmp443 = -tmp442
tmp444 = triton_helpers.minimum(tmp441, tmp443)
tmp446 = -tmp445
tmp447 = triton_helpers.minimum(tmp444, tmp446)
tmp449 = tmp447 - tmp448
tmp450 = triton_helpers.maximum(tmp3, tmp449)
tmp451 = tmp439 * tmp450
tmp452 = tmp450 - tmp451
tmp453 = triton_helpers.maximum(tmp3, tmp452)
tmp454 = tmp439 + tmp453
tmp456 = -tmp455
tmp458 = -tmp457
tmp459 = triton_helpers.minimum(tmp456, tmp458)
tmp461 = -tmp460
tmp462 = triton_helpers.minimum(tmp459, tmp461)
tmp464 = tmp462 - tmp463
tmp465 = triton_helpers.maximum(tmp3, tmp464)
tmp466 = tmp454 * tmp465
tmp467 = tmp465 - tmp466
tmp468 = triton_helpers.maximum(tmp3, tmp467)
tmp469 = tmp454 + tmp468
tmp471 = -tmp470
tmp473 = -tmp472
tmp474 = triton_helpers.minimum(tmp471, tmp473)
tmp476 = -tmp475
tmp477 = triton_helpers.minimum(tmp474, tmp476)
tmp479 = tmp477 - tmp478
tmp480 = triton_helpers.maximum(tmp3, tmp479)
tmp481 = tmp469 * tmp480
tmp482 = tmp480 - tmp481
tmp483 = triton_helpers.maximum(tmp3, tmp482)
tmp484 = tmp469 + tmp483
tmp486 = -tmp485
tmp488 = -tmp487
tmp489 = triton_helpers.minimum(tmp486, tmp488)
tmp491 = -tmp490
tmp492 = triton_helpers.minimum(tmp489, tmp491)
tmp494 = tmp492 - tmp493
tmp495 = triton_helpers.maximum(tmp3, tmp494)
tmp496 = tmp484 * tmp495
tmp497 = tmp495 - tmp496
tmp498 = triton_helpers.maximum(tmp3, tmp497)
tmp499 = tmp484 + tmp498
tmp501 = -tmp500
tmp503 = -tmp502
tmp504 = triton_helpers.minimum(tmp501, tmp503)
tmp506 = -tmp505
tmp507 = triton_helpers.minimum(tmp504, tmp506)
tmp509 = tmp507 - tmp508
tmp510 = triton_helpers.maximum(tmp3, tmp509)
tmp511 = tmp499 * tmp510
tmp512 = tmp510 - tmp511
tmp513 = triton_helpers.maximum(tmp3, tmp512)
tmp514 = tmp499 + tmp513
tmp516 = -tmp515
tmp518 = -tmp517
tmp519 = triton_helpers.minimum(tmp516, tmp518)
tmp521 = -tmp520
tmp522 = triton_helpers.minimum(tmp519, tmp521)
tmp524 = tmp522 - tmp523
tmp525 = triton_helpers.maximum(tmp3, tmp524)
tmp526 = tmp514 * tmp525
tmp527 = tmp525 - tmp526
tmp528 = triton_helpers.maximum(tmp3, tmp527)
tmp529 = tmp514 + tmp528
tmp531 = -tmp530
tmp533 = -tmp532
tmp534 = triton_helpers.minimum(tmp531, tmp533)
tmp536 = -tmp535
tmp537 = triton_helpers.minimum(tmp534, tmp536)
tmp539 = tmp537 - tmp538
tmp540 = triton_helpers.maximum(tmp3, tmp539)
tmp541 = tmp529 * tmp540
tmp542 = tmp540 - tmp541
tmp543 = triton_helpers.maximum(tmp3, tmp542)
tmp544 = tmp529 + tmp543
tmp546 = -tmp545
tmp548 = -tmp547
tmp549 = triton_helpers.minimum(tmp546, tmp548)
tmp551 = -tmp550
tmp552 = triton_helpers.minimum(tmp549, tmp551)
tmp554 = tmp552 - tmp553
tmp555 = triton_helpers.maximum(tmp3, tmp554)
tmp556 = tmp544 * tmp555
tmp557 = tmp555 - tmp556
tmp558 = triton_helpers.maximum(tmp3, tmp557)
tmp559 = tmp544 + tmp558
tmp561 = -tmp560
tmp563 = -tmp562
tmp564 = triton_helpers.minimum(tmp561, tmp563)
tmp566 = -tmp565
tmp567 = triton_helpers.minimum(tmp564, tmp566)
tmp569 = tmp567 - tmp568
tmp570 = triton_helpers.maximum(tmp3, tmp569)
tmp571 = tmp559 * tmp570
tmp572 = tmp570 - tmp571
tmp573 = triton_helpers.maximum(tmp3, tmp572)
tmp574 = tmp559 + tmp573
tmp576 = -tmp575
tmp578 = -tmp577
tmp579 = triton_helpers.minimum(tmp576, tmp578)
tmp581 = -tmp580
tmp582 = triton_helpers.minimum(tmp579, tmp581)
tmp584 = tmp582 - tmp583
tmp585 = triton_helpers.maximum(tmp3, tmp584)
tmp586 = tmp574 * tmp585
tmp587 = tmp585 - tmp586
tmp588 = triton_helpers.maximum(tmp3, tmp587)
tmp589 = tmp574 + tmp588
tmp591 = -tmp590
tmp593 = -tmp592
tmp594 = triton_helpers.minimum(tmp591, tmp593)
tmp596 = -tmp595
tmp597 = triton_helpers.minimum(tmp594, tmp596)
tmp599 = tmp597 - tmp598
tmp600 = triton_helpers.maximum(tmp3, tmp599)
tmp601 = tmp589 * tmp600
tmp602 = tmp600 - tmp601
tmp603 = triton_helpers.maximum(tmp3, tmp602)
tmp604 = tmp589 + tmp603
tmp606 = -tmp605
tmp608 = -tmp607
tmp609 = triton_helpers.minimum(tmp606, tmp608)
tmp611 = -tmp610
tmp612 = triton_helpers.minimum(tmp609, tmp611)
tmp614 = tmp612 - tmp613
tmp615 = triton_helpers.maximum(tmp3, tmp614)
tmp616 = tmp604 * tmp615
tmp617 = tmp615 - tmp616
tmp618 = triton_helpers.maximum(tmp3, tmp617)
tmp619 = tmp604 + tmp618
tmp621 = -tmp620
tmp623 = -tmp622
tmp624 = triton_helpers.minimum(tmp621, tmp623)
tmp626 = -tmp625
tmp627 = triton_helpers.minimum(tmp624, tmp626)
tmp629 = tmp627 - tmp628
tmp630 = triton_helpers.maximum(tmp3, tmp629)
tmp631 = tmp619 * tmp630
tmp632 = tmp630 - tmp631
tmp633 = triton_helpers.maximum(tmp3, tmp632)
tmp634 = tmp619 + tmp633
tmp636 = -tmp635
tmp638 = -tmp637
tmp639 = triton_helpers.minimum(tmp636, tmp638)
tmp641 = -tmp640
tmp642 = triton_helpers.minimum(tmp639, tmp641)
tmp644 = tmp642 - tmp643
tmp645 = triton_helpers.maximum(tmp3, tmp644)
tmp646 = tmp634 * tmp645
tmp647 = tmp645 - tmp646
tmp648 = triton_helpers.maximum(tmp3, tmp647)
tmp649 = tmp634 + tmp648
tmp651 = -tmp650
tmp653 = -tmp652
tmp654 = triton_helpers.minimum(tmp651, tmp653)
tmp656 = -tmp655
tmp657 = triton_helpers.minimum(tmp654, tmp656)
tmp659 = tmp657 - tmp658
tmp660 = triton_helpers.maximum(tmp3, tmp659)
tmp661 = tmp649 * tmp660
tmp662 = tmp660 - tmp661
tmp663 = triton_helpers.maximum(tmp3, tmp662)
tmp664 = tmp649 + tmp663
tmp666 = -tmp665
tmp668 = -tmp667
tmp669 = triton_helpers.minimum(tmp666, tmp668)
tmp671 = -tmp670
tmp672 = triton_helpers.minimum(tmp669, tmp671)
tmp674 = tmp672 - tmp673
tmp675 = triton_helpers.maximum(tmp3, tmp674)
tmp676 = tmp664 * tmp675
tmp677 = tmp675 - tmp676
tmp678 = triton_helpers.maximum(tmp3, tmp677)
tmp679 = tmp664 + tmp678
tmp681 = -tmp680
tmp683 = -tmp682
tmp684 = triton_helpers.minimum(tmp681, tmp683)
tmp686 = -tmp685
tmp687 = triton_helpers.minimum(tmp684, tmp686)
tmp689 = tmp687 - tmp688
tmp690 = triton_helpers.maximum(tmp3, tmp689)
tmp691 = tmp679 * tmp690
tmp692 = tmp690 - tmp691
tmp693 = triton_helpers.maximum(tmp3, tmp692)
tmp694 = tmp679 + tmp693
tmp696 = -tmp695
tmp698 = -tmp697
tmp699 = triton_helpers.minimum(tmp696, tmp698)
tmp701 = -tmp700
tmp702 = triton_helpers.minimum(tmp699, tmp701)
tmp704 = tmp702 - tmp703
tmp705 = triton_helpers.maximum(tmp3, tmp704)
tmp706 = tmp694 * tmp705
tmp707 = tmp705 - tmp706
tmp708 = triton_helpers.maximum(tmp3, tmp707)
tmp709 = tmp694 + tmp708
tmp711 = -tmp710
tmp713 = -tmp712
tmp714 = triton_helpers.minimum(tmp711, tmp713)
tmp716 = -tmp715
tmp717 = triton_helpers.minimum(tmp714, tmp716)
tmp719 = tmp717 - tmp718
tmp720 = triton_helpers.maximum(tmp3, tmp719)
tmp721 = tmp709 * tmp720
tmp722 = tmp720 - tmp721
tmp723 = triton_helpers.maximum(tmp3, tmp722)
tmp724 = tmp709 + tmp723
tmp726 = -tmp725
tmp728 = -tmp727
tmp729 = triton_helpers.minimum(tmp726, tmp728)
tmp731 = -tmp730
tmp732 = triton_helpers.minimum(tmp729, tmp731)
tmp734 = tmp732 - tmp733
tmp735 = triton_helpers.maximum(tmp3, tmp734)
tmp736 = tmp724 * tmp735
tmp737 = tmp735 - tmp736
tmp738 = triton_helpers.maximum(tmp3, tmp737)
tmp739 = tmp724 + tmp738
tmp741 = -tmp740
tmp743 = -tmp742
tmp744 = triton_helpers.minimum(tmp741, tmp743)
tmp746 = -tmp745
tmp747 = triton_helpers.minimum(tmp744, tmp746)
tmp749 = tmp747 - tmp748
tmp750 = triton_helpers.maximum(tmp3, tmp749)
tmp751 = tmp739 * tmp750
tmp752 = tmp750 - tmp751
tmp753 = triton_helpers.maximum(tmp3, tmp752)
tmp754 = tmp739 + tmp753
tl.store(in_out_ptr22 + x0, tmp754, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf20 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf24 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_neg_0[grid(256)](arg0_1, buf0, buf4, buf8, buf16,
buf20, buf24, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf1 = torch.ops.aten.max_pool3d_with_indices.default(buf0, [3, 1,
1], [1, 1, 1], [1, 0, 0])
del buf0
buf2 = buf1[0]
del buf1
buf5 = torch.ops.aten.max_pool3d_with_indices.default(buf4, [1, 3,
1], [1, 1, 1], [0, 1, 0])
del buf4
buf6 = buf5[0]
del buf5
buf9 = torch.ops.aten.max_pool3d_with_indices.default(buf8, [1, 1,
3], [1, 1, 1], [0, 0, 1])
del buf8
buf10 = buf9[0]
del buf9
buf12 = buf10
del buf10
triton_poi_fused_minimum_neg_1[grid(256)](buf12, buf2, buf6, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf13 = torch.ops.aten.max_pool3d_with_indices.default(buf12, [3, 3,
3], [1, 1, 1], [1, 1, 1])
buf14 = buf13[0]
del buf13
buf17 = torch.ops.aten.max_pool3d_with_indices.default(buf16, [3, 1,
1], [1, 1, 1], [1, 0, 0])
buf18 = buf17[0]
del buf17
buf21 = torch.ops.aten.max_pool3d_with_indices.default(buf20, [1, 3,
1], [1, 1, 1], [0, 1, 0])
buf22 = buf21[0]
del buf21
buf25 = torch.ops.aten.max_pool3d_with_indices.default(buf24, [1, 1,
3], [1, 1, 1], [0, 0, 1])
buf26 = buf25[0]
del buf25
buf28 = buf24
del buf24
buf32 = buf20
del buf20
buf36 = buf16
del buf16
buf44 = buf12
del buf12
buf48 = buf6
del buf6
buf52 = buf2
del buf2
triton_poi_fused_minimum_neg_2[grid(256)](buf18, buf26, buf22,
buf28, buf32, buf36, buf44, buf48, buf52, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf29 = torch.ops.aten.max_pool3d_with_indices.default(buf28, [3, 1,
1], [1, 1, 1], [1, 0, 0])
del buf28
buf30 = buf29[0]
del buf29
buf33 = torch.ops.aten.max_pool3d_with_indices.default(buf32, [1, 3,
1], [1, 1, 1], [0, 1, 0])
del buf32
buf34 = buf33[0]
del buf33
buf37 = torch.ops.aten.max_pool3d_with_indices.default(buf36, [1, 1,
3], [1, 1, 1], [0, 0, 1])
del buf36
buf38 = buf37[0]
del buf37
buf40 = buf30
del buf30
triton_poi_fused_minimum_neg_3[grid(256)](buf40, buf38, buf34, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf41 = torch.ops.aten.max_pool3d_with_indices.default(buf40, [3, 3,
3], [1, 1, 1], [1, 1, 1])
buf42 = buf41[0]
del buf41
buf45 = torch.ops.aten.max_pool3d_with_indices.default(buf44, [3, 1,
1], [1, 1, 1], [1, 0, 0])
buf46 = buf45[0]
del buf45
buf49 = torch.ops.aten.max_pool3d_with_indices.default(buf48, [1, 3,
1], [1, 1, 1], [0, 1, 0])
buf50 = buf49[0]
del buf49
buf53 = torch.ops.aten.max_pool3d_with_indices.default(buf52, [1, 1,
3], [1, 1, 1], [0, 0, 1])
buf54 = buf53[0]
del buf53
buf56 = buf52
del buf52
buf60 = buf48
del buf48
buf64 = buf44
del buf44
buf72 = buf40
del buf40
buf76 = buf38
del buf38
buf80 = buf34
del buf34
triton_poi_fused_minimum_neg_2[grid(256)](buf46, buf54, buf50,
buf56, buf60, buf64, buf72, buf76, buf80, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf57 = torch.ops.aten.max_pool3d_with_indices.default(buf56, [3, 1,
1], [1, 1, 1], [1, 0, 0])
del buf56
buf58 = buf57[0]
del buf57
buf61 = torch.ops.aten.max_pool3d_with_indices.default(buf60, [1, 3,
1], [1, 1, 1], [0, 1, 0])
del buf60
buf62 = buf61[0]
del buf61
buf65 = torch.ops.aten.max_pool3d_with_indices.default(buf64, [1, 1,
3], [1, 1, 1], [0, 0, 1])
del buf64
buf66 = buf65[0]
del buf65
buf68 = buf58
del buf58
triton_poi_fused_minimum_neg_3[grid(256)](buf68, buf66, buf62, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf69 = torch.ops.aten.max_pool3d_with_indices.default(buf68, [3, 3,
3], [1, 1, 1], [1, 1, 1])
buf70 = buf69[0]
del buf69
buf73 = torch.ops.aten.max_pool3d_with_indices.default(buf72, [3, 1,
1], [1, 1, 1], [1, 0, 0])
buf74 = buf73[0]
del buf73
buf77 = torch.ops.aten.max_pool3d_with_indices.default(buf76, [1, 3,
1], [1, 1, 1], [0, 1, 0])
buf78 = buf77[0]
del buf77
buf81 = torch.ops.aten.max_pool3d_with_indices.default(buf80, [1, 1,
3], [1, 1, 1], [0, 0, 1])
buf82 = buf81[0]
del buf81
buf84 = buf80
del buf80
buf88 = buf76
del buf76
buf92 = buf72
del buf72
buf100 = buf68
del buf68
buf104 = buf66
del buf66
buf108 = buf62
del buf62
triton_poi_fused_minimum_neg_2[grid(256)](buf74, buf82, buf78,
buf84, buf88, buf92, buf100, buf104, buf108, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf85 = torch.ops.aten.max_pool3d_with_indices.default(buf84, [3, 1,
1], [1, 1, 1], [1, 0, 0])
del buf84
buf86 = buf85[0]
del buf85
buf89 = torch.ops.aten.max_pool3d_with_indices.default(buf88, [1, 3,
1], [1, 1, 1], [0, 1, 0])
del buf88
buf90 = buf89[0]
del buf89
buf93 = torch.ops.aten.max_pool3d_with_indices.default(buf92, [1, 1,
3], [1, 1, 1], [0, 0, 1])
del buf92
buf94 = buf93[0]
del buf93
buf96 = buf86
del buf86
triton_poi_fused_minimum_neg_3[grid(256)](buf96, buf94, buf90, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf97 = torch.ops.aten.max_pool3d_with_indices.default(buf96, [3, 3,
3], [1, 1, 1], [1, 1, 1])
buf98 = buf97[0]
del buf97
buf101 = torch.ops.aten.max_pool3d_with_indices.default(buf100, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf102 = buf101[0]
del buf101
buf105 = torch.ops.aten.max_pool3d_with_indices.default(buf104, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf106 = buf105[0]
del buf105
buf109 = torch.ops.aten.max_pool3d_with_indices.default(buf108, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf110 = buf109[0]
del buf109
buf112 = buf108
del buf108
buf116 = buf104
del buf104
buf120 = buf100
del buf100
buf128 = buf96
del buf96
buf132 = buf94
del buf94
buf136 = buf90
del buf90
triton_poi_fused_minimum_neg_2[grid(256)](buf102, buf110, buf106,
buf112, buf116, buf120, buf128, buf132, buf136, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf113 = torch.ops.aten.max_pool3d_with_indices.default(buf112, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf112
buf114 = buf113[0]
del buf113
buf117 = torch.ops.aten.max_pool3d_with_indices.default(buf116, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf116
buf118 = buf117[0]
del buf117
buf121 = torch.ops.aten.max_pool3d_with_indices.default(buf120, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf120
buf122 = buf121[0]
del buf121
buf124 = buf114
del buf114
triton_poi_fused_minimum_neg_3[grid(256)](buf124, buf122, buf118,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf125 = torch.ops.aten.max_pool3d_with_indices.default(buf124, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf126 = buf125[0]
del buf125
buf129 = torch.ops.aten.max_pool3d_with_indices.default(buf128, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf130 = buf129[0]
del buf129
buf133 = torch.ops.aten.max_pool3d_with_indices.default(buf132, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf134 = buf133[0]
del buf133
buf137 = torch.ops.aten.max_pool3d_with_indices.default(buf136, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf138 = buf137[0]
del buf137
buf140 = buf136
del buf136
buf144 = buf132
del buf132
buf148 = buf128
del buf128
buf156 = buf124
del buf124
buf160 = buf122
del buf122
buf164 = buf118
del buf118
triton_poi_fused_minimum_neg_2[grid(256)](buf130, buf138, buf134,
buf140, buf144, buf148, buf156, buf160, buf164, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf141 = torch.ops.aten.max_pool3d_with_indices.default(buf140, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf140
buf142 = buf141[0]
del buf141
buf145 = torch.ops.aten.max_pool3d_with_indices.default(buf144, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf144
buf146 = buf145[0]
del buf145
buf149 = torch.ops.aten.max_pool3d_with_indices.default(buf148, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf148
buf150 = buf149[0]
del buf149
buf152 = buf142
del buf142
triton_poi_fused_minimum_neg_3[grid(256)](buf152, buf150, buf146,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf153 = torch.ops.aten.max_pool3d_with_indices.default(buf152, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf154 = buf153[0]
del buf153
buf157 = torch.ops.aten.max_pool3d_with_indices.default(buf156, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf158 = buf157[0]
del buf157
buf161 = torch.ops.aten.max_pool3d_with_indices.default(buf160, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf162 = buf161[0]
del buf161
buf165 = torch.ops.aten.max_pool3d_with_indices.default(buf164, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf166 = buf165[0]
del buf165
buf168 = buf164
del buf164
buf172 = buf160
del buf160
buf176 = buf156
del buf156
buf184 = buf152
del buf152
buf188 = buf150
del buf150
buf192 = buf146
del buf146
triton_poi_fused_minimum_neg_2[grid(256)](buf158, buf166, buf162,
buf168, buf172, buf176, buf184, buf188, buf192, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf169 = torch.ops.aten.max_pool3d_with_indices.default(buf168, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf168
buf170 = buf169[0]
del buf169
buf173 = torch.ops.aten.max_pool3d_with_indices.default(buf172, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf172
buf174 = buf173[0]
del buf173
buf177 = torch.ops.aten.max_pool3d_with_indices.default(buf176, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf176
buf178 = buf177[0]
del buf177
buf180 = buf170
del buf170
triton_poi_fused_minimum_neg_3[grid(256)](buf180, buf178, buf174,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf181 = torch.ops.aten.max_pool3d_with_indices.default(buf180, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf182 = buf181[0]
del buf181
buf185 = torch.ops.aten.max_pool3d_with_indices.default(buf184, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf186 = buf185[0]
del buf185
buf189 = torch.ops.aten.max_pool3d_with_indices.default(buf188, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf190 = buf189[0]
del buf189
buf193 = torch.ops.aten.max_pool3d_with_indices.default(buf192, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf194 = buf193[0]
del buf193
buf196 = buf192
del buf192
buf200 = buf188
del buf188
buf204 = buf184
del buf184
buf212 = buf180
del buf180
buf216 = buf178
del buf178
buf220 = buf174
del buf174
triton_poi_fused_minimum_neg_2[grid(256)](buf186, buf194, buf190,
buf196, buf200, buf204, buf212, buf216, buf220, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf197 = torch.ops.aten.max_pool3d_with_indices.default(buf196, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf196
buf198 = buf197[0]
del buf197
buf201 = torch.ops.aten.max_pool3d_with_indices.default(buf200, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf200
buf202 = buf201[0]
del buf201
buf205 = torch.ops.aten.max_pool3d_with_indices.default(buf204, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf204
buf206 = buf205[0]
del buf205
buf208 = buf198
del buf198
triton_poi_fused_minimum_neg_3[grid(256)](buf208, buf206, buf202,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf209 = torch.ops.aten.max_pool3d_with_indices.default(buf208, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf210 = buf209[0]
del buf209
buf213 = torch.ops.aten.max_pool3d_with_indices.default(buf212, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf214 = buf213[0]
del buf213
buf217 = torch.ops.aten.max_pool3d_with_indices.default(buf216, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf218 = buf217[0]
del buf217
buf221 = torch.ops.aten.max_pool3d_with_indices.default(buf220, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf222 = buf221[0]
del buf221
buf224 = buf220
del buf220
buf228 = buf216
del buf216
buf232 = buf212
del buf212
buf240 = buf208
del buf208
buf244 = buf206
del buf206
buf248 = buf202
del buf202
triton_poi_fused_minimum_neg_2[grid(256)](buf214, buf222, buf218,
buf224, buf228, buf232, buf240, buf244, buf248, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf225 = torch.ops.aten.max_pool3d_with_indices.default(buf224, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf224
buf226 = buf225[0]
del buf225
buf229 = torch.ops.aten.max_pool3d_with_indices.default(buf228, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf228
buf230 = buf229[0]
del buf229
buf233 = torch.ops.aten.max_pool3d_with_indices.default(buf232, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf232
buf234 = buf233[0]
del buf233
buf236 = buf226
del buf226
triton_poi_fused_minimum_neg_3[grid(256)](buf236, buf234, buf230,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf237 = torch.ops.aten.max_pool3d_with_indices.default(buf236, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf238 = buf237[0]
del buf237
buf241 = torch.ops.aten.max_pool3d_with_indices.default(buf240, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf242 = buf241[0]
del buf241
buf245 = torch.ops.aten.max_pool3d_with_indices.default(buf244, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf246 = buf245[0]
del buf245
buf249 = torch.ops.aten.max_pool3d_with_indices.default(buf248, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf250 = buf249[0]
del buf249
buf252 = buf248
del buf248
buf256 = buf244
del buf244
buf260 = buf240
del buf240
buf268 = buf236
del buf236
buf272 = buf234
del buf234
buf276 = buf230
del buf230
triton_poi_fused_minimum_neg_2[grid(256)](buf242, buf250, buf246,
buf252, buf256, buf260, buf268, buf272, buf276, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf253 = torch.ops.aten.max_pool3d_with_indices.default(buf252, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf252
buf254 = buf253[0]
del buf253
buf257 = torch.ops.aten.max_pool3d_with_indices.default(buf256, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf256
buf258 = buf257[0]
del buf257
buf261 = torch.ops.aten.max_pool3d_with_indices.default(buf260, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf260
buf262 = buf261[0]
del buf261
buf264 = buf254
del buf254
triton_poi_fused_minimum_neg_3[grid(256)](buf264, buf262, buf258,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf265 = torch.ops.aten.max_pool3d_with_indices.default(buf264, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf266 = buf265[0]
del buf265
buf269 = torch.ops.aten.max_pool3d_with_indices.default(buf268, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf270 = buf269[0]
del buf269
buf273 = torch.ops.aten.max_pool3d_with_indices.default(buf272, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf274 = buf273[0]
del buf273
buf277 = torch.ops.aten.max_pool3d_with_indices.default(buf276, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf278 = buf277[0]
del buf277
buf280 = buf276
del buf276
buf284 = buf272
del buf272
buf288 = buf268
del buf268
buf296 = buf264
del buf264
buf300 = buf262
del buf262
buf304 = buf258
del buf258
triton_poi_fused_minimum_neg_2[grid(256)](buf270, buf278, buf274,
buf280, buf284, buf288, buf296, buf300, buf304, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf281 = torch.ops.aten.max_pool3d_with_indices.default(buf280, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf280
buf282 = buf281[0]
del buf281
buf285 = torch.ops.aten.max_pool3d_with_indices.default(buf284, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf284
buf286 = buf285[0]
del buf285
buf289 = torch.ops.aten.max_pool3d_with_indices.default(buf288, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf288
buf290 = buf289[0]
del buf289
buf292 = buf282
del buf282
triton_poi_fused_minimum_neg_3[grid(256)](buf292, buf290, buf286,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf293 = torch.ops.aten.max_pool3d_with_indices.default(buf292, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf294 = buf293[0]
del buf293
buf297 = torch.ops.aten.max_pool3d_with_indices.default(buf296, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf298 = buf297[0]
del buf297
buf301 = torch.ops.aten.max_pool3d_with_indices.default(buf300, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf302 = buf301[0]
del buf301
buf305 = torch.ops.aten.max_pool3d_with_indices.default(buf304, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf306 = buf305[0]
del buf305
buf308 = buf304
del buf304
buf312 = buf300
del buf300
buf316 = buf296
del buf296
buf324 = buf292
del buf292
buf328 = buf290
del buf290
buf332 = buf286
del buf286
triton_poi_fused_minimum_neg_2[grid(256)](buf298, buf306, buf302,
buf308, buf312, buf316, buf324, buf328, buf332, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf309 = torch.ops.aten.max_pool3d_with_indices.default(buf308, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf308
buf310 = buf309[0]
del buf309
buf313 = torch.ops.aten.max_pool3d_with_indices.default(buf312, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf312
buf314 = buf313[0]
del buf313
buf317 = torch.ops.aten.max_pool3d_with_indices.default(buf316, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf316
buf318 = buf317[0]
del buf317
buf320 = buf310
del buf310
triton_poi_fused_minimum_neg_3[grid(256)](buf320, buf318, buf314,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf321 = torch.ops.aten.max_pool3d_with_indices.default(buf320, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf322 = buf321[0]
del buf321
buf325 = torch.ops.aten.max_pool3d_with_indices.default(buf324, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf326 = buf325[0]
del buf325
buf329 = torch.ops.aten.max_pool3d_with_indices.default(buf328, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf330 = buf329[0]
del buf329
buf333 = torch.ops.aten.max_pool3d_with_indices.default(buf332, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf334 = buf333[0]
del buf333
buf336 = buf332
del buf332
buf340 = buf328
del buf328
buf344 = buf324
del buf324
buf352 = buf320
del buf320
buf356 = buf318
del buf318
buf360 = buf314
del buf314
triton_poi_fused_minimum_neg_2[grid(256)](buf326, buf334, buf330,
buf336, buf340, buf344, buf352, buf356, buf360, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf337 = torch.ops.aten.max_pool3d_with_indices.default(buf336, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf336
buf338 = buf337[0]
del buf337
buf341 = torch.ops.aten.max_pool3d_with_indices.default(buf340, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf340
buf342 = buf341[0]
del buf341
buf345 = torch.ops.aten.max_pool3d_with_indices.default(buf344, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf344
buf346 = buf345[0]
del buf345
buf348 = buf338
del buf338
triton_poi_fused_minimum_neg_3[grid(256)](buf348, buf346, buf342,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf349 = torch.ops.aten.max_pool3d_with_indices.default(buf348, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf350 = buf349[0]
del buf349
buf353 = torch.ops.aten.max_pool3d_with_indices.default(buf352, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf354 = buf353[0]
del buf353
buf357 = torch.ops.aten.max_pool3d_with_indices.default(buf356, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf358 = buf357[0]
del buf357
buf361 = torch.ops.aten.max_pool3d_with_indices.default(buf360, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf362 = buf361[0]
del buf361
buf364 = buf360
del buf360
buf368 = buf356
del buf356
buf372 = buf352
del buf352
buf380 = buf348
del buf348
buf384 = buf346
del buf346
buf388 = buf342
del buf342
triton_poi_fused_minimum_neg_2[grid(256)](buf354, buf362, buf358,
buf364, buf368, buf372, buf380, buf384, buf388, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf365 = torch.ops.aten.max_pool3d_with_indices.default(buf364, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf364
buf366 = buf365[0]
del buf365
buf369 = torch.ops.aten.max_pool3d_with_indices.default(buf368, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf368
buf370 = buf369[0]
del buf369
buf373 = torch.ops.aten.max_pool3d_with_indices.default(buf372, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf372
buf374 = buf373[0]
del buf373
buf376 = buf366
del buf366
triton_poi_fused_minimum_neg_3[grid(256)](buf376, buf374, buf370,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf377 = torch.ops.aten.max_pool3d_with_indices.default(buf376, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf378 = buf377[0]
del buf377
buf381 = torch.ops.aten.max_pool3d_with_indices.default(buf380, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf382 = buf381[0]
del buf381
buf385 = torch.ops.aten.max_pool3d_with_indices.default(buf384, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf386 = buf385[0]
del buf385
buf389 = torch.ops.aten.max_pool3d_with_indices.default(buf388, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf390 = buf389[0]
del buf389
buf392 = buf388
del buf388
buf396 = buf384
del buf384
buf400 = buf380
del buf380
buf408 = buf376
del buf376
buf412 = buf374
del buf374
buf416 = buf370
del buf370
triton_poi_fused_minimum_neg_2[grid(256)](buf382, buf390, buf386,
buf392, buf396, buf400, buf408, buf412, buf416, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf393 = torch.ops.aten.max_pool3d_with_indices.default(buf392, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf392
buf394 = buf393[0]
del buf393
buf397 = torch.ops.aten.max_pool3d_with_indices.default(buf396, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf396
buf398 = buf397[0]
del buf397
buf401 = torch.ops.aten.max_pool3d_with_indices.default(buf400, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf400
buf402 = buf401[0]
del buf401
buf404 = buf394
del buf394
triton_poi_fused_minimum_neg_3[grid(256)](buf404, buf402, buf398,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf405 = torch.ops.aten.max_pool3d_with_indices.default(buf404, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf406 = buf405[0]
del buf405
buf409 = torch.ops.aten.max_pool3d_with_indices.default(buf408, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf410 = buf409[0]
del buf409
buf413 = torch.ops.aten.max_pool3d_with_indices.default(buf412, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf414 = buf413[0]
del buf413
buf417 = torch.ops.aten.max_pool3d_with_indices.default(buf416, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf418 = buf417[0]
del buf417
buf420 = buf416
del buf416
buf424 = buf412
del buf412
buf428 = buf408
del buf408
buf436 = buf404
del buf404
buf440 = buf402
del buf402
buf444 = buf398
del buf398
triton_poi_fused_minimum_neg_2[grid(256)](buf410, buf418, buf414,
buf420, buf424, buf428, buf436, buf440, buf444, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf421 = torch.ops.aten.max_pool3d_with_indices.default(buf420, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf420
buf422 = buf421[0]
del buf421
buf425 = torch.ops.aten.max_pool3d_with_indices.default(buf424, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf424
buf426 = buf425[0]
del buf425
buf429 = torch.ops.aten.max_pool3d_with_indices.default(buf428, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf428
buf430 = buf429[0]
del buf429
buf432 = buf422
del buf422
triton_poi_fused_minimum_neg_3[grid(256)](buf432, buf430, buf426,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf433 = torch.ops.aten.max_pool3d_with_indices.default(buf432, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf434 = buf433[0]
del buf433
buf437 = torch.ops.aten.max_pool3d_with_indices.default(buf436, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf438 = buf437[0]
del buf437
buf441 = torch.ops.aten.max_pool3d_with_indices.default(buf440, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf442 = buf441[0]
del buf441
buf445 = torch.ops.aten.max_pool3d_with_indices.default(buf444, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf446 = buf445[0]
del buf445
buf448 = buf444
del buf444
buf452 = buf440
del buf440
buf456 = buf436
del buf436
buf464 = buf432
del buf432
buf468 = buf430
del buf430
buf472 = buf426
del buf426
triton_poi_fused_minimum_neg_2[grid(256)](buf438, buf446, buf442,
buf448, buf452, buf456, buf464, buf468, buf472, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf449 = torch.ops.aten.max_pool3d_with_indices.default(buf448, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf448
buf450 = buf449[0]
del buf449
buf453 = torch.ops.aten.max_pool3d_with_indices.default(buf452, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf452
buf454 = buf453[0]
del buf453
buf457 = torch.ops.aten.max_pool3d_with_indices.default(buf456, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf456
buf458 = buf457[0]
del buf457
buf460 = buf450
del buf450
triton_poi_fused_minimum_neg_3[grid(256)](buf460, buf458, buf454,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf461 = torch.ops.aten.max_pool3d_with_indices.default(buf460, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf462 = buf461[0]
del buf461
buf465 = torch.ops.aten.max_pool3d_with_indices.default(buf464, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf466 = buf465[0]
del buf465
buf469 = torch.ops.aten.max_pool3d_with_indices.default(buf468, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf470 = buf469[0]
del buf469
buf473 = torch.ops.aten.max_pool3d_with_indices.default(buf472, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf474 = buf473[0]
del buf473
buf476 = buf472
del buf472
buf480 = buf468
del buf468
buf484 = buf464
del buf464
buf492 = buf460
del buf460
buf496 = buf458
del buf458
buf500 = buf454
del buf454
triton_poi_fused_minimum_neg_2[grid(256)](buf466, buf474, buf470,
buf476, buf480, buf484, buf492, buf496, buf500, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf477 = torch.ops.aten.max_pool3d_with_indices.default(buf476, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf476
buf478 = buf477[0]
del buf477
buf481 = torch.ops.aten.max_pool3d_with_indices.default(buf480, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf480
buf482 = buf481[0]
del buf481
buf485 = torch.ops.aten.max_pool3d_with_indices.default(buf484, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf484
buf486 = buf485[0]
del buf485
buf488 = buf478
del buf478
triton_poi_fused_minimum_neg_3[grid(256)](buf488, buf486, buf482,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf489 = torch.ops.aten.max_pool3d_with_indices.default(buf488, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf490 = buf489[0]
del buf489
buf493 = torch.ops.aten.max_pool3d_with_indices.default(buf492, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf494 = buf493[0]
del buf493
buf497 = torch.ops.aten.max_pool3d_with_indices.default(buf496, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf498 = buf497[0]
del buf497
buf501 = torch.ops.aten.max_pool3d_with_indices.default(buf500, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf502 = buf501[0]
del buf501
buf504 = buf500
del buf500
buf508 = buf496
del buf496
buf512 = buf492
del buf492
buf520 = buf488
del buf488
buf524 = buf486
del buf486
buf528 = buf482
del buf482
triton_poi_fused_minimum_neg_2[grid(256)](buf494, buf502, buf498,
buf504, buf508, buf512, buf520, buf524, buf528, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf505 = torch.ops.aten.max_pool3d_with_indices.default(buf504, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf504
buf506 = buf505[0]
del buf505
buf509 = torch.ops.aten.max_pool3d_with_indices.default(buf508, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf508
buf510 = buf509[0]
del buf509
buf513 = torch.ops.aten.max_pool3d_with_indices.default(buf512, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf512
buf514 = buf513[0]
del buf513
buf516 = buf506
del buf506
triton_poi_fused_minimum_neg_3[grid(256)](buf516, buf514, buf510,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf517 = torch.ops.aten.max_pool3d_with_indices.default(buf516, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf518 = buf517[0]
del buf517
buf521 = torch.ops.aten.max_pool3d_with_indices.default(buf520, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf522 = buf521[0]
del buf521
buf525 = torch.ops.aten.max_pool3d_with_indices.default(buf524, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf526 = buf525[0]
del buf525
buf529 = torch.ops.aten.max_pool3d_with_indices.default(buf528, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf530 = buf529[0]
del buf529
buf532 = buf528
del buf528
buf536 = buf524
del buf524
buf540 = buf520
del buf520
buf548 = buf516
del buf516
buf552 = buf514
del buf514
buf556 = buf510
del buf510
triton_poi_fused_minimum_neg_2[grid(256)](buf522, buf530, buf526,
buf532, buf536, buf540, buf548, buf552, buf556, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf533 = torch.ops.aten.max_pool3d_with_indices.default(buf532, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf532
buf534 = buf533[0]
del buf533
buf537 = torch.ops.aten.max_pool3d_with_indices.default(buf536, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf536
buf538 = buf537[0]
del buf537
buf541 = torch.ops.aten.max_pool3d_with_indices.default(buf540, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf540
buf542 = buf541[0]
del buf541
buf544 = buf534
del buf534
triton_poi_fused_minimum_neg_3[grid(256)](buf544, buf542, buf538,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf545 = torch.ops.aten.max_pool3d_with_indices.default(buf544, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf546 = buf545[0]
del buf545
buf549 = torch.ops.aten.max_pool3d_with_indices.default(buf548, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf550 = buf549[0]
del buf549
buf553 = torch.ops.aten.max_pool3d_with_indices.default(buf552, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf554 = buf553[0]
del buf553
buf557 = torch.ops.aten.max_pool3d_with_indices.default(buf556, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf558 = buf557[0]
del buf557
buf560 = buf556
del buf556
buf564 = buf552
del buf552
buf568 = buf548
del buf548
buf576 = buf544
del buf544
buf580 = buf542
del buf542
buf584 = buf538
del buf538
triton_poi_fused_minimum_neg_2[grid(256)](buf550, buf558, buf554,
buf560, buf564, buf568, buf576, buf580, buf584, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf561 = torch.ops.aten.max_pool3d_with_indices.default(buf560, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf560
buf562 = buf561[0]
del buf561
buf565 = torch.ops.aten.max_pool3d_with_indices.default(buf564, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf564
buf566 = buf565[0]
del buf565
buf569 = torch.ops.aten.max_pool3d_with_indices.default(buf568, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf568
buf570 = buf569[0]
del buf569
buf572 = buf562
del buf562
triton_poi_fused_minimum_neg_3[grid(256)](buf572, buf570, buf566,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf573 = torch.ops.aten.max_pool3d_with_indices.default(buf572, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf574 = buf573[0]
del buf573
buf577 = torch.ops.aten.max_pool3d_with_indices.default(buf576, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf578 = buf577[0]
del buf577
buf581 = torch.ops.aten.max_pool3d_with_indices.default(buf580, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf582 = buf581[0]
del buf581
buf585 = torch.ops.aten.max_pool3d_with_indices.default(buf584, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf586 = buf585[0]
del buf585
buf588 = buf584
del buf584
buf592 = buf580
del buf580
buf596 = buf576
del buf576
buf604 = buf572
del buf572
buf608 = buf570
del buf570
buf612 = buf566
del buf566
triton_poi_fused_minimum_neg_2[grid(256)](buf578, buf586, buf582,
buf588, buf592, buf596, buf604, buf608, buf612, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf589 = torch.ops.aten.max_pool3d_with_indices.default(buf588, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf588
buf590 = buf589[0]
del buf589
buf593 = torch.ops.aten.max_pool3d_with_indices.default(buf592, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf592
buf594 = buf593[0]
del buf593
buf597 = torch.ops.aten.max_pool3d_with_indices.default(buf596, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf596
buf598 = buf597[0]
del buf597
buf600 = buf590
del buf590
triton_poi_fused_minimum_neg_3[grid(256)](buf600, buf598, buf594,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf601 = torch.ops.aten.max_pool3d_with_indices.default(buf600, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf602 = buf601[0]
del buf601
buf605 = torch.ops.aten.max_pool3d_with_indices.default(buf604, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf606 = buf605[0]
del buf605
buf609 = torch.ops.aten.max_pool3d_with_indices.default(buf608, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf610 = buf609[0]
del buf609
buf613 = torch.ops.aten.max_pool3d_with_indices.default(buf612, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf614 = buf613[0]
del buf613
buf616 = buf612
del buf612
buf620 = buf608
del buf608
buf624 = buf604
del buf604
buf632 = buf600
del buf600
buf636 = buf598
del buf598
buf640 = buf594
del buf594
triton_poi_fused_minimum_neg_2[grid(256)](buf606, buf614, buf610,
buf616, buf620, buf624, buf632, buf636, buf640, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf617 = torch.ops.aten.max_pool3d_with_indices.default(buf616, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf616
buf618 = buf617[0]
del buf617
buf621 = torch.ops.aten.max_pool3d_with_indices.default(buf620, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf620
buf622 = buf621[0]
del buf621
buf625 = torch.ops.aten.max_pool3d_with_indices.default(buf624, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf624
buf626 = buf625[0]
del buf625
buf628 = buf618
del buf618
triton_poi_fused_minimum_neg_3[grid(256)](buf628, buf626, buf622,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf629 = torch.ops.aten.max_pool3d_with_indices.default(buf628, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf630 = buf629[0]
del buf629
buf633 = torch.ops.aten.max_pool3d_with_indices.default(buf632, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf634 = buf633[0]
del buf633
buf637 = torch.ops.aten.max_pool3d_with_indices.default(buf636, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf638 = buf637[0]
del buf637
buf641 = torch.ops.aten.max_pool3d_with_indices.default(buf640, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf642 = buf641[0]
del buf641
buf644 = buf640
del buf640
buf648 = buf636
del buf636
buf652 = buf632
del buf632
buf660 = buf628
del buf628
buf664 = buf626
del buf626
buf668 = buf622
del buf622
triton_poi_fused_minimum_neg_2[grid(256)](buf634, buf642, buf638,
buf644, buf648, buf652, buf660, buf664, buf668, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf645 = torch.ops.aten.max_pool3d_with_indices.default(buf644, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf644
buf646 = buf645[0]
del buf645
buf649 = torch.ops.aten.max_pool3d_with_indices.default(buf648, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf648
buf650 = buf649[0]
del buf649
buf653 = torch.ops.aten.max_pool3d_with_indices.default(buf652, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf652
buf654 = buf653[0]
del buf653
buf656 = buf646
del buf646
triton_poi_fused_minimum_neg_3[grid(256)](buf656, buf654, buf650,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf657 = torch.ops.aten.max_pool3d_with_indices.default(buf656, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf658 = buf657[0]
del buf657
buf661 = torch.ops.aten.max_pool3d_with_indices.default(buf660, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf662 = buf661[0]
del buf661
buf665 = torch.ops.aten.max_pool3d_with_indices.default(buf664, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf666 = buf665[0]
del buf665
buf669 = torch.ops.aten.max_pool3d_with_indices.default(buf668, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf670 = buf669[0]
del buf669
buf672 = buf668
del buf668
buf676 = buf664
del buf664
buf680 = buf660
del buf660
buf688 = buf656
del buf656
buf692 = buf654
del buf654
buf696 = buf650
del buf650
triton_poi_fused_minimum_neg_2[grid(256)](buf662, buf670, buf666,
buf672, buf676, buf680, buf688, buf692, buf696, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf673 = torch.ops.aten.max_pool3d_with_indices.default(buf672, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf672
buf674 = buf673[0]
del buf673
buf677 = torch.ops.aten.max_pool3d_with_indices.default(buf676, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf676
buf678 = buf677[0]
del buf677
buf681 = torch.ops.aten.max_pool3d_with_indices.default(buf680, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf680
buf682 = buf681[0]
del buf681
buf684 = buf674
del buf674
triton_poi_fused_minimum_neg_3[grid(256)](buf684, buf682, buf678,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf685 = torch.ops.aten.max_pool3d_with_indices.default(buf684, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf686 = buf685[0]
del buf685
buf689 = torch.ops.aten.max_pool3d_with_indices.default(buf688, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf690 = buf689[0]
del buf689
buf693 = torch.ops.aten.max_pool3d_with_indices.default(buf692, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf694 = buf693[0]
del buf693
buf697 = torch.ops.aten.max_pool3d_with_indices.default(buf696, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf698 = buf697[0]
del buf697
buf700 = buf696
del buf696
buf704 = buf692
del buf692
buf708 = buf688
del buf688
buf716 = buf684
del buf684
buf720 = buf682
del buf682
buf724 = buf678
del buf678
triton_poi_fused_minimum_neg_2[grid(256)](buf690, buf698, buf694,
buf700, buf704, buf708, buf716, buf720, buf724, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf701 = torch.ops.aten.max_pool3d_with_indices.default(buf700, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf700
buf702 = buf701[0]
del buf701
buf705 = torch.ops.aten.max_pool3d_with_indices.default(buf704, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf704
buf706 = buf705[0]
del buf705
buf709 = torch.ops.aten.max_pool3d_with_indices.default(buf708, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf708
buf710 = buf709[0]
del buf709
buf712 = buf702
del buf702
triton_poi_fused_minimum_neg_3[grid(256)](buf712, buf710, buf706,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf713 = torch.ops.aten.max_pool3d_with_indices.default(buf712, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf714 = buf713[0]
del buf713
buf717 = torch.ops.aten.max_pool3d_with_indices.default(buf716, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf718 = buf717[0]
del buf717
buf721 = torch.ops.aten.max_pool3d_with_indices.default(buf720, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf722 = buf721[0]
del buf721
buf725 = torch.ops.aten.max_pool3d_with_indices.default(buf724, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf726 = buf725[0]
del buf725
buf728 = buf724
del buf724
buf732 = buf720
del buf720
buf736 = buf716
del buf716
buf744 = buf712
del buf712
buf748 = buf710
del buf710
buf752 = buf706
del buf706
triton_poi_fused_minimum_neg_2[grid(256)](buf718, buf726, buf722,
buf728, buf732, buf736, buf744, buf748, buf752, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf729 = torch.ops.aten.max_pool3d_with_indices.default(buf728, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf728
buf730 = buf729[0]
del buf729
buf733 = torch.ops.aten.max_pool3d_with_indices.default(buf732, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf732
buf734 = buf733[0]
del buf733
buf737 = torch.ops.aten.max_pool3d_with_indices.default(buf736, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf736
buf738 = buf737[0]
del buf737
buf740 = buf730
del buf730
triton_poi_fused_minimum_neg_3[grid(256)](buf740, buf738, buf734,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf741 = torch.ops.aten.max_pool3d_with_indices.default(buf740, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf742 = buf741[0]
del buf741
buf745 = torch.ops.aten.max_pool3d_with_indices.default(buf744, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf746 = buf745[0]
del buf745
buf749 = torch.ops.aten.max_pool3d_with_indices.default(buf748, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf750 = buf749[0]
del buf749
buf753 = torch.ops.aten.max_pool3d_with_indices.default(buf752, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf754 = buf753[0]
del buf753
buf756 = buf752
del buf752
buf760 = buf748
del buf748
buf764 = buf744
del buf744
buf772 = buf740
del buf740
buf776 = buf738
del buf738
buf780 = buf734
del buf734
triton_poi_fused_minimum_neg_2[grid(256)](buf746, buf754, buf750,
buf756, buf760, buf764, buf772, buf776, buf780, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf757 = torch.ops.aten.max_pool3d_with_indices.default(buf756, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf756
buf758 = buf757[0]
del buf757
buf761 = torch.ops.aten.max_pool3d_with_indices.default(buf760, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf760
buf762 = buf761[0]
del buf761
buf765 = torch.ops.aten.max_pool3d_with_indices.default(buf764, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf764
buf766 = buf765[0]
del buf765
buf768 = buf758
del buf758
triton_poi_fused_minimum_neg_3[grid(256)](buf768, buf766, buf762,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf769 = torch.ops.aten.max_pool3d_with_indices.default(buf768, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf770 = buf769[0]
del buf769
buf773 = torch.ops.aten.max_pool3d_with_indices.default(buf772, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf774 = buf773[0]
del buf773
buf777 = torch.ops.aten.max_pool3d_with_indices.default(buf776, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf778 = buf777[0]
del buf777
buf781 = torch.ops.aten.max_pool3d_with_indices.default(buf780, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf782 = buf781[0]
del buf781
buf784 = buf780
del buf780
buf788 = buf776
del buf776
buf792 = buf772
del buf772
buf800 = buf768
del buf768
buf804 = buf766
del buf766
buf808 = buf762
del buf762
triton_poi_fused_minimum_neg_2[grid(256)](buf774, buf782, buf778,
buf784, buf788, buf792, buf800, buf804, buf808, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf785 = torch.ops.aten.max_pool3d_with_indices.default(buf784, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf784
buf786 = buf785[0]
del buf785
buf789 = torch.ops.aten.max_pool3d_with_indices.default(buf788, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf788
buf790 = buf789[0]
del buf789
buf793 = torch.ops.aten.max_pool3d_with_indices.default(buf792, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf792
buf794 = buf793[0]
del buf793
buf796 = buf786
del buf786
triton_poi_fused_minimum_neg_3[grid(256)](buf796, buf794, buf790,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf797 = torch.ops.aten.max_pool3d_with_indices.default(buf796, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf798 = buf797[0]
del buf797
buf801 = torch.ops.aten.max_pool3d_with_indices.default(buf800, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf802 = buf801[0]
del buf801
buf805 = torch.ops.aten.max_pool3d_with_indices.default(buf804, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf806 = buf805[0]
del buf805
buf809 = torch.ops.aten.max_pool3d_with_indices.default(buf808, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf810 = buf809[0]
del buf809
buf812 = buf808
del buf808
buf816 = buf804
del buf804
buf820 = buf800
del buf800
buf828 = buf796
del buf796
buf832 = buf794
del buf794
buf836 = buf790
del buf790
triton_poi_fused_minimum_neg_2[grid(256)](buf802, buf810, buf806,
buf812, buf816, buf820, buf828, buf832, buf836, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf813 = torch.ops.aten.max_pool3d_with_indices.default(buf812, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf812
buf814 = buf813[0]
del buf813
buf817 = torch.ops.aten.max_pool3d_with_indices.default(buf816, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf816
buf818 = buf817[0]
del buf817
buf821 = torch.ops.aten.max_pool3d_with_indices.default(buf820, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf820
buf822 = buf821[0]
del buf821
buf824 = buf814
del buf814
triton_poi_fused_minimum_neg_3[grid(256)](buf824, buf822, buf818,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf825 = torch.ops.aten.max_pool3d_with_indices.default(buf824, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf826 = buf825[0]
del buf825
buf829 = torch.ops.aten.max_pool3d_with_indices.default(buf828, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf830 = buf829[0]
del buf829
buf833 = torch.ops.aten.max_pool3d_with_indices.default(buf832, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf834 = buf833[0]
del buf833
buf837 = torch.ops.aten.max_pool3d_with_indices.default(buf836, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf838 = buf837[0]
del buf837
buf840 = buf836
del buf836
buf844 = buf832
del buf832
buf848 = buf828
del buf828
buf856 = buf824
del buf824
buf860 = buf822
del buf822
buf864 = buf818
del buf818
triton_poi_fused_minimum_neg_2[grid(256)](buf830, buf838, buf834,
buf840, buf844, buf848, buf856, buf860, buf864, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf841 = torch.ops.aten.max_pool3d_with_indices.default(buf840, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf840
buf842 = buf841[0]
del buf841
buf845 = torch.ops.aten.max_pool3d_with_indices.default(buf844, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf844
buf846 = buf845[0]
del buf845
buf849 = torch.ops.aten.max_pool3d_with_indices.default(buf848, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf848
buf850 = buf849[0]
del buf849
buf852 = buf842
del buf842
triton_poi_fused_minimum_neg_3[grid(256)](buf852, buf850, buf846,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf853 = torch.ops.aten.max_pool3d_with_indices.default(buf852, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf854 = buf853[0]
del buf853
buf857 = torch.ops.aten.max_pool3d_with_indices.default(buf856, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf858 = buf857[0]
del buf857
buf861 = torch.ops.aten.max_pool3d_with_indices.default(buf860, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf862 = buf861[0]
del buf861
buf865 = torch.ops.aten.max_pool3d_with_indices.default(buf864, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf866 = buf865[0]
del buf865
buf868 = buf864
del buf864
buf872 = buf860
del buf860
buf876 = buf856
del buf856
buf884 = buf852
del buf852
buf888 = buf850
del buf850
buf892 = buf846
del buf846
triton_poi_fused_minimum_neg_2[grid(256)](buf858, buf866, buf862,
buf868, buf872, buf876, buf884, buf888, buf892, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf869 = torch.ops.aten.max_pool3d_with_indices.default(buf868, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf868
buf870 = buf869[0]
del buf869
buf873 = torch.ops.aten.max_pool3d_with_indices.default(buf872, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf872
buf874 = buf873[0]
del buf873
buf877 = torch.ops.aten.max_pool3d_with_indices.default(buf876, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf876
buf878 = buf877[0]
del buf877
buf880 = buf870
del buf870
triton_poi_fused_minimum_neg_3[grid(256)](buf880, buf878, buf874,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf881 = torch.ops.aten.max_pool3d_with_indices.default(buf880, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf882 = buf881[0]
del buf881
buf885 = torch.ops.aten.max_pool3d_with_indices.default(buf884, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf886 = buf885[0]
del buf885
buf889 = torch.ops.aten.max_pool3d_with_indices.default(buf888, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf890 = buf889[0]
del buf889
buf893 = torch.ops.aten.max_pool3d_with_indices.default(buf892, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf894 = buf893[0]
del buf893
buf896 = buf892
del buf892
buf900 = buf888
del buf888
buf904 = buf884
del buf884
buf912 = buf880
del buf880
buf916 = buf878
del buf878
buf920 = buf874
del buf874
triton_poi_fused_minimum_neg_2[grid(256)](buf886, buf894, buf890,
buf896, buf900, buf904, buf912, buf916, buf920, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf897 = torch.ops.aten.max_pool3d_with_indices.default(buf896, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf896
buf898 = buf897[0]
del buf897
buf901 = torch.ops.aten.max_pool3d_with_indices.default(buf900, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf900
buf902 = buf901[0]
del buf901
buf905 = torch.ops.aten.max_pool3d_with_indices.default(buf904, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf904
buf906 = buf905[0]
del buf905
buf908 = buf898
del buf898
triton_poi_fused_minimum_neg_3[grid(256)](buf908, buf906, buf902,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf909 = torch.ops.aten.max_pool3d_with_indices.default(buf908, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf910 = buf909[0]
del buf909
buf913 = torch.ops.aten.max_pool3d_with_indices.default(buf912, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf914 = buf913[0]
del buf913
buf917 = torch.ops.aten.max_pool3d_with_indices.default(buf916, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf918 = buf917[0]
del buf917
buf921 = torch.ops.aten.max_pool3d_with_indices.default(buf920, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf922 = buf921[0]
del buf921
buf924 = buf920
del buf920
buf928 = buf916
del buf916
buf932 = buf912
del buf912
buf940 = buf908
del buf908
buf944 = buf906
del buf906
buf948 = buf902
del buf902
triton_poi_fused_minimum_neg_2[grid(256)](buf914, buf922, buf918,
buf924, buf928, buf932, buf940, buf944, buf948, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf925 = torch.ops.aten.max_pool3d_with_indices.default(buf924, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf924
buf926 = buf925[0]
del buf925
buf929 = torch.ops.aten.max_pool3d_with_indices.default(buf928, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf928
buf930 = buf929[0]
del buf929
buf933 = torch.ops.aten.max_pool3d_with_indices.default(buf932, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf932
buf934 = buf933[0]
del buf933
buf936 = buf926
del buf926
triton_poi_fused_minimum_neg_3[grid(256)](buf936, buf934, buf930,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf937 = torch.ops.aten.max_pool3d_with_indices.default(buf936, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf938 = buf937[0]
del buf937
buf941 = torch.ops.aten.max_pool3d_with_indices.default(buf940, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf942 = buf941[0]
del buf941
buf945 = torch.ops.aten.max_pool3d_with_indices.default(buf944, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf946 = buf945[0]
del buf945
buf949 = torch.ops.aten.max_pool3d_with_indices.default(buf948, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf950 = buf949[0]
del buf949
buf952 = buf948
del buf948
buf956 = buf944
del buf944
buf960 = buf940
del buf940
buf968 = buf936
del buf936
buf972 = buf934
del buf934
buf976 = buf930
del buf930
triton_poi_fused_minimum_neg_2[grid(256)](buf942, buf950, buf946,
buf952, buf956, buf960, buf968, buf972, buf976, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf953 = torch.ops.aten.max_pool3d_with_indices.default(buf952, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf952
buf954 = buf953[0]
del buf953
buf957 = torch.ops.aten.max_pool3d_with_indices.default(buf956, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf956
buf958 = buf957[0]
del buf957
buf961 = torch.ops.aten.max_pool3d_with_indices.default(buf960, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf960
buf962 = buf961[0]
del buf961
buf964 = buf954
del buf954
triton_poi_fused_minimum_neg_3[grid(256)](buf964, buf962, buf958,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf965 = torch.ops.aten.max_pool3d_with_indices.default(buf964, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf966 = buf965[0]
del buf965
buf969 = torch.ops.aten.max_pool3d_with_indices.default(buf968, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf970 = buf969[0]
del buf969
buf973 = torch.ops.aten.max_pool3d_with_indices.default(buf972, [1,
3, 1], [1, 1, 1], [0, 1, 0])
buf974 = buf973[0]
del buf973
buf977 = torch.ops.aten.max_pool3d_with_indices.default(buf976, [1,
1, 3], [1, 1, 1], [0, 0, 1])
buf978 = buf977[0]
del buf977
buf980 = buf976
del buf976
buf984 = buf972
del buf972
buf988 = buf968
del buf968
buf996 = buf964
del buf964
buf1000 = buf962
del buf962
buf1004 = buf958
del buf958
triton_poi_fused_minimum_neg_2[grid(256)](buf970, buf978, buf974,
buf980, buf984, buf988, buf996, buf1000, buf1004, 256, XBLOCK=
128, num_warps=4, num_stages=1)
buf981 = torch.ops.aten.max_pool3d_with_indices.default(buf980, [3,
1, 1], [1, 1, 1], [1, 0, 0])
del buf980
buf982 = buf981[0]
del buf981
buf985 = torch.ops.aten.max_pool3d_with_indices.default(buf984, [1,
3, 1], [1, 1, 1], [0, 1, 0])
del buf984
buf986 = buf985[0]
del buf985
buf989 = torch.ops.aten.max_pool3d_with_indices.default(buf988, [1,
1, 3], [1, 1, 1], [0, 0, 1])
del buf988
buf990 = buf989[0]
del buf989
buf992 = buf982
del buf982
triton_poi_fused_minimum_neg_3[grid(256)](buf992, buf990, buf986,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf993 = torch.ops.aten.max_pool3d_with_indices.default(buf992, [3,
3, 3], [1, 1, 1], [1, 1, 1])
buf994 = buf993[0]
del buf993
buf997 = torch.ops.aten.max_pool3d_with_indices.default(buf996, [3,
1, 1], [1, 1, 1], [1, 0, 0])
buf998 = buf997[0]
del buf997
buf1001 = torch.ops.aten.max_pool3d_with_indices.default(buf1000, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1002 = buf1001[0]
del buf1001
buf1005 = torch.ops.aten.max_pool3d_with_indices.default(buf1004, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1006 = buf1005[0]
del buf1005
buf1008 = buf1004
del buf1004
buf1012 = buf1000
del buf1000
buf1016 = buf996
del buf996
buf1024 = buf992
del buf992
buf1028 = buf990
del buf990
buf1032 = buf986
del buf986
triton_poi_fused_minimum_neg_2[grid(256)](buf998, buf1006, buf1002,
buf1008, buf1012, buf1016, buf1024, buf1028, buf1032, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1009 = torch.ops.aten.max_pool3d_with_indices.default(buf1008, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1008
buf1010 = buf1009[0]
del buf1009
buf1013 = torch.ops.aten.max_pool3d_with_indices.default(buf1012, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1012
buf1014 = buf1013[0]
del buf1013
buf1017 = torch.ops.aten.max_pool3d_with_indices.default(buf1016, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1016
buf1018 = buf1017[0]
del buf1017
buf1020 = buf1010
del buf1010
triton_poi_fused_minimum_neg_3[grid(256)](buf1020, buf1018, buf1014,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1021 = torch.ops.aten.max_pool3d_with_indices.default(buf1020, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1022 = buf1021[0]
del buf1021
buf1025 = torch.ops.aten.max_pool3d_with_indices.default(buf1024, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1026 = buf1025[0]
del buf1025
buf1029 = torch.ops.aten.max_pool3d_with_indices.default(buf1028, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1030 = buf1029[0]
del buf1029
buf1033 = torch.ops.aten.max_pool3d_with_indices.default(buf1032, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1034 = buf1033[0]
del buf1033
buf1036 = buf1032
del buf1032
buf1040 = buf1028
del buf1028
buf1044 = buf1024
del buf1024
buf1052 = buf1020
del buf1020
buf1056 = buf1018
del buf1018
buf1060 = buf1014
del buf1014
triton_poi_fused_minimum_neg_2[grid(256)](buf1026, buf1034, buf1030,
buf1036, buf1040, buf1044, buf1052, buf1056, buf1060, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1037 = torch.ops.aten.max_pool3d_with_indices.default(buf1036, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1036
buf1038 = buf1037[0]
del buf1037
buf1041 = torch.ops.aten.max_pool3d_with_indices.default(buf1040, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1040
buf1042 = buf1041[0]
del buf1041
buf1045 = torch.ops.aten.max_pool3d_with_indices.default(buf1044, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1044
buf1046 = buf1045[0]
del buf1045
buf1048 = buf1038
del buf1038
triton_poi_fused_minimum_neg_3[grid(256)](buf1048, buf1046, buf1042,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1049 = torch.ops.aten.max_pool3d_with_indices.default(buf1048, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1050 = buf1049[0]
del buf1049
buf1053 = torch.ops.aten.max_pool3d_with_indices.default(buf1052, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1054 = buf1053[0]
del buf1053
buf1057 = torch.ops.aten.max_pool3d_with_indices.default(buf1056, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1058 = buf1057[0]
del buf1057
buf1061 = torch.ops.aten.max_pool3d_with_indices.default(buf1060, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1062 = buf1061[0]
del buf1061
buf1064 = buf1060
del buf1060
buf1068 = buf1056
del buf1056
buf1072 = buf1052
del buf1052
buf1080 = buf1048
del buf1048
buf1084 = buf1046
del buf1046
buf1088 = buf1042
del buf1042
triton_poi_fused_minimum_neg_2[grid(256)](buf1054, buf1062, buf1058,
buf1064, buf1068, buf1072, buf1080, buf1084, buf1088, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1065 = torch.ops.aten.max_pool3d_with_indices.default(buf1064, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1064
buf1066 = buf1065[0]
del buf1065
buf1069 = torch.ops.aten.max_pool3d_with_indices.default(buf1068, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1068
buf1070 = buf1069[0]
del buf1069
buf1073 = torch.ops.aten.max_pool3d_with_indices.default(buf1072, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1072
buf1074 = buf1073[0]
del buf1073
buf1076 = buf1066
del buf1066
triton_poi_fused_minimum_neg_3[grid(256)](buf1076, buf1074, buf1070,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1077 = torch.ops.aten.max_pool3d_with_indices.default(buf1076, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1078 = buf1077[0]
del buf1077
buf1081 = torch.ops.aten.max_pool3d_with_indices.default(buf1080, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1082 = buf1081[0]
del buf1081
buf1085 = torch.ops.aten.max_pool3d_with_indices.default(buf1084, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1086 = buf1085[0]
del buf1085
buf1089 = torch.ops.aten.max_pool3d_with_indices.default(buf1088, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1090 = buf1089[0]
del buf1089
buf1092 = buf1088
del buf1088
buf1096 = buf1084
del buf1084
buf1100 = buf1080
del buf1080
buf1108 = buf1076
del buf1076
buf1112 = buf1074
del buf1074
buf1116 = buf1070
del buf1070
triton_poi_fused_minimum_neg_2[grid(256)](buf1082, buf1090, buf1086,
buf1092, buf1096, buf1100, buf1108, buf1112, buf1116, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1093 = torch.ops.aten.max_pool3d_with_indices.default(buf1092, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1092
buf1094 = buf1093[0]
del buf1093
buf1097 = torch.ops.aten.max_pool3d_with_indices.default(buf1096, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1096
buf1098 = buf1097[0]
del buf1097
buf1101 = torch.ops.aten.max_pool3d_with_indices.default(buf1100, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1100
buf1102 = buf1101[0]
del buf1101
buf1104 = buf1094
del buf1094
triton_poi_fused_minimum_neg_3[grid(256)](buf1104, buf1102, buf1098,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1105 = torch.ops.aten.max_pool3d_with_indices.default(buf1104, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1106 = buf1105[0]
del buf1105
buf1109 = torch.ops.aten.max_pool3d_with_indices.default(buf1108, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1110 = buf1109[0]
del buf1109
buf1113 = torch.ops.aten.max_pool3d_with_indices.default(buf1112, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1114 = buf1113[0]
del buf1113
buf1117 = torch.ops.aten.max_pool3d_with_indices.default(buf1116, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1118 = buf1117[0]
del buf1117
buf1120 = buf1116
del buf1116
buf1124 = buf1112
del buf1112
buf1128 = buf1108
del buf1108
buf1136 = buf1104
del buf1104
buf1140 = buf1102
del buf1102
buf1144 = buf1098
del buf1098
triton_poi_fused_minimum_neg_2[grid(256)](buf1110, buf1118, buf1114,
buf1120, buf1124, buf1128, buf1136, buf1140, buf1144, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1121 = torch.ops.aten.max_pool3d_with_indices.default(buf1120, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1120
buf1122 = buf1121[0]
del buf1121
buf1125 = torch.ops.aten.max_pool3d_with_indices.default(buf1124, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1124
buf1126 = buf1125[0]
del buf1125
buf1129 = torch.ops.aten.max_pool3d_with_indices.default(buf1128, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1128
buf1130 = buf1129[0]
del buf1129
buf1132 = buf1122
del buf1122
triton_poi_fused_minimum_neg_3[grid(256)](buf1132, buf1130, buf1126,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1133 = torch.ops.aten.max_pool3d_with_indices.default(buf1132, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1134 = buf1133[0]
del buf1133
buf1137 = torch.ops.aten.max_pool3d_with_indices.default(buf1136, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1138 = buf1137[0]
del buf1137
buf1141 = torch.ops.aten.max_pool3d_with_indices.default(buf1140, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1142 = buf1141[0]
del buf1141
buf1145 = torch.ops.aten.max_pool3d_with_indices.default(buf1144, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1146 = buf1145[0]
del buf1145
buf1148 = buf1144
del buf1144
buf1152 = buf1140
del buf1140
buf1156 = buf1136
del buf1136
buf1164 = buf1132
del buf1132
buf1168 = buf1130
del buf1130
buf1172 = buf1126
del buf1126
triton_poi_fused_minimum_neg_2[grid(256)](buf1138, buf1146, buf1142,
buf1148, buf1152, buf1156, buf1164, buf1168, buf1172, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1149 = torch.ops.aten.max_pool3d_with_indices.default(buf1148, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1148
buf1150 = buf1149[0]
del buf1149
buf1153 = torch.ops.aten.max_pool3d_with_indices.default(buf1152, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1152
buf1154 = buf1153[0]
del buf1153
buf1157 = torch.ops.aten.max_pool3d_with_indices.default(buf1156, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1156
buf1158 = buf1157[0]
del buf1157
buf1160 = buf1150
del buf1150
triton_poi_fused_minimum_neg_3[grid(256)](buf1160, buf1158, buf1154,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1161 = torch.ops.aten.max_pool3d_with_indices.default(buf1160, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1162 = buf1161[0]
del buf1161
buf1165 = torch.ops.aten.max_pool3d_with_indices.default(buf1164, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1166 = buf1165[0]
del buf1165
buf1169 = torch.ops.aten.max_pool3d_with_indices.default(buf1168, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1170 = buf1169[0]
del buf1169
buf1173 = torch.ops.aten.max_pool3d_with_indices.default(buf1172, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1174 = buf1173[0]
del buf1173
buf1176 = buf1172
del buf1172
buf1180 = buf1168
del buf1168
buf1184 = buf1164
del buf1164
buf1192 = buf1160
del buf1160
buf1196 = buf1158
del buf1158
buf1200 = buf1154
del buf1154
triton_poi_fused_minimum_neg_2[grid(256)](buf1166, buf1174, buf1170,
buf1176, buf1180, buf1184, buf1192, buf1196, buf1200, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1177 = torch.ops.aten.max_pool3d_with_indices.default(buf1176, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1176
buf1178 = buf1177[0]
del buf1177
buf1181 = torch.ops.aten.max_pool3d_with_indices.default(buf1180, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1180
buf1182 = buf1181[0]
del buf1181
buf1185 = torch.ops.aten.max_pool3d_with_indices.default(buf1184, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1184
buf1186 = buf1185[0]
del buf1185
buf1188 = buf1178
del buf1178
triton_poi_fused_minimum_neg_3[grid(256)](buf1188, buf1186, buf1182,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1189 = torch.ops.aten.max_pool3d_with_indices.default(buf1188, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1190 = buf1189[0]
del buf1189
buf1193 = torch.ops.aten.max_pool3d_with_indices.default(buf1192, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1194 = buf1193[0]
del buf1193
buf1197 = torch.ops.aten.max_pool3d_with_indices.default(buf1196, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1198 = buf1197[0]
del buf1197
buf1201 = torch.ops.aten.max_pool3d_with_indices.default(buf1200, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1202 = buf1201[0]
del buf1201
buf1204 = buf1200
del buf1200
buf1208 = buf1196
del buf1196
buf1212 = buf1192
del buf1192
buf1220 = buf1188
del buf1188
buf1224 = buf1186
del buf1186
buf1228 = buf1182
del buf1182
triton_poi_fused_minimum_neg_2[grid(256)](buf1194, buf1202, buf1198,
buf1204, buf1208, buf1212, buf1220, buf1224, buf1228, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1205 = torch.ops.aten.max_pool3d_with_indices.default(buf1204, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1204
buf1206 = buf1205[0]
del buf1205
buf1209 = torch.ops.aten.max_pool3d_with_indices.default(buf1208, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1208
buf1210 = buf1209[0]
del buf1209
buf1213 = torch.ops.aten.max_pool3d_with_indices.default(buf1212, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1212
buf1214 = buf1213[0]
del buf1213
buf1216 = buf1206
del buf1206
triton_poi_fused_minimum_neg_3[grid(256)](buf1216, buf1214, buf1210,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1217 = torch.ops.aten.max_pool3d_with_indices.default(buf1216, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1218 = buf1217[0]
del buf1217
buf1221 = torch.ops.aten.max_pool3d_with_indices.default(buf1220, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1222 = buf1221[0]
del buf1221
buf1225 = torch.ops.aten.max_pool3d_with_indices.default(buf1224, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1226 = buf1225[0]
del buf1225
buf1229 = torch.ops.aten.max_pool3d_with_indices.default(buf1228, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1230 = buf1229[0]
del buf1229
buf1232 = buf1228
del buf1228
buf1236 = buf1224
del buf1224
buf1240 = buf1220
del buf1220
buf1248 = buf1216
del buf1216
buf1252 = buf1214
del buf1214
buf1256 = buf1210
del buf1210
triton_poi_fused_minimum_neg_2[grid(256)](buf1222, buf1230, buf1226,
buf1232, buf1236, buf1240, buf1248, buf1252, buf1256, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1233 = torch.ops.aten.max_pool3d_with_indices.default(buf1232, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1232
buf1234 = buf1233[0]
del buf1233
buf1237 = torch.ops.aten.max_pool3d_with_indices.default(buf1236, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1236
buf1238 = buf1237[0]
del buf1237
buf1241 = torch.ops.aten.max_pool3d_with_indices.default(buf1240, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1240
buf1242 = buf1241[0]
del buf1241
buf1244 = buf1234
del buf1234
triton_poi_fused_minimum_neg_3[grid(256)](buf1244, buf1242, buf1238,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1245 = torch.ops.aten.max_pool3d_with_indices.default(buf1244, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1246 = buf1245[0]
del buf1245
buf1249 = torch.ops.aten.max_pool3d_with_indices.default(buf1248, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1250 = buf1249[0]
del buf1249
buf1253 = torch.ops.aten.max_pool3d_with_indices.default(buf1252, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1254 = buf1253[0]
del buf1253
buf1257 = torch.ops.aten.max_pool3d_with_indices.default(buf1256, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1258 = buf1257[0]
del buf1257
buf1260 = buf1256
del buf1256
buf1264 = buf1252
del buf1252
buf1268 = buf1248
del buf1248
buf1276 = buf1244
del buf1244
buf1280 = buf1242
del buf1242
buf1284 = buf1238
del buf1238
triton_poi_fused_minimum_neg_2[grid(256)](buf1250, buf1258, buf1254,
buf1260, buf1264, buf1268, buf1276, buf1280, buf1284, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1261 = torch.ops.aten.max_pool3d_with_indices.default(buf1260, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1260
buf1262 = buf1261[0]
del buf1261
buf1265 = torch.ops.aten.max_pool3d_with_indices.default(buf1264, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1264
buf1266 = buf1265[0]
del buf1265
buf1269 = torch.ops.aten.max_pool3d_with_indices.default(buf1268, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1268
buf1270 = buf1269[0]
del buf1269
buf1272 = buf1262
del buf1262
triton_poi_fused_minimum_neg_3[grid(256)](buf1272, buf1270, buf1266,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1273 = torch.ops.aten.max_pool3d_with_indices.default(buf1272, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1274 = buf1273[0]
del buf1273
buf1277 = torch.ops.aten.max_pool3d_with_indices.default(buf1276, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1278 = buf1277[0]
del buf1277
buf1281 = torch.ops.aten.max_pool3d_with_indices.default(buf1280, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1282 = buf1281[0]
del buf1281
buf1285 = torch.ops.aten.max_pool3d_with_indices.default(buf1284, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1286 = buf1285[0]
del buf1285
buf1288 = buf1284
del buf1284
buf1292 = buf1280
del buf1280
buf1296 = buf1276
del buf1276
buf1304 = buf1272
del buf1272
buf1308 = buf1270
del buf1270
buf1312 = buf1266
del buf1266
triton_poi_fused_minimum_neg_2[grid(256)](buf1278, buf1286, buf1282,
buf1288, buf1292, buf1296, buf1304, buf1308, buf1312, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1289 = torch.ops.aten.max_pool3d_with_indices.default(buf1288, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1288
buf1290 = buf1289[0]
del buf1289
buf1293 = torch.ops.aten.max_pool3d_with_indices.default(buf1292, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1292
buf1294 = buf1293[0]
del buf1293
buf1297 = torch.ops.aten.max_pool3d_with_indices.default(buf1296, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1296
buf1298 = buf1297[0]
del buf1297
buf1300 = buf1290
del buf1290
triton_poi_fused_minimum_neg_3[grid(256)](buf1300, buf1298, buf1294,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1301 = torch.ops.aten.max_pool3d_with_indices.default(buf1300, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1302 = buf1301[0]
del buf1301
buf1305 = torch.ops.aten.max_pool3d_with_indices.default(buf1304, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1306 = buf1305[0]
del buf1305
buf1309 = torch.ops.aten.max_pool3d_with_indices.default(buf1308, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1310 = buf1309[0]
del buf1309
buf1313 = torch.ops.aten.max_pool3d_with_indices.default(buf1312, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1314 = buf1313[0]
del buf1313
buf1316 = buf1312
del buf1312
buf1320 = buf1308
del buf1308
buf1324 = buf1304
del buf1304
buf1332 = buf1300
del buf1300
buf1336 = buf1298
del buf1298
buf1340 = buf1294
del buf1294
triton_poi_fused_minimum_neg_2[grid(256)](buf1306, buf1314, buf1310,
buf1316, buf1320, buf1324, buf1332, buf1336, buf1340, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1317 = torch.ops.aten.max_pool3d_with_indices.default(buf1316, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1316
buf1318 = buf1317[0]
del buf1317
buf1321 = torch.ops.aten.max_pool3d_with_indices.default(buf1320, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1320
buf1322 = buf1321[0]
del buf1321
buf1325 = torch.ops.aten.max_pool3d_with_indices.default(buf1324, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1324
buf1326 = buf1325[0]
del buf1325
buf1328 = buf1318
del buf1318
triton_poi_fused_minimum_neg_3[grid(256)](buf1328, buf1326, buf1322,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1329 = torch.ops.aten.max_pool3d_with_indices.default(buf1328, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1330 = buf1329[0]
del buf1329
buf1333 = torch.ops.aten.max_pool3d_with_indices.default(buf1332, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1334 = buf1333[0]
del buf1333
buf1337 = torch.ops.aten.max_pool3d_with_indices.default(buf1336, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1338 = buf1337[0]
del buf1337
buf1341 = torch.ops.aten.max_pool3d_with_indices.default(buf1340, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1342 = buf1341[0]
del buf1341
buf1344 = buf1340
del buf1340
buf1348 = buf1336
del buf1336
buf1352 = buf1332
del buf1332
buf1360 = buf1328
del buf1328
buf1364 = buf1326
del buf1326
buf1368 = buf1322
del buf1322
triton_poi_fused_minimum_neg_2[grid(256)](buf1334, buf1342, buf1338,
buf1344, buf1348, buf1352, buf1360, buf1364, buf1368, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1345 = torch.ops.aten.max_pool3d_with_indices.default(buf1344, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1344
buf1346 = buf1345[0]
del buf1345
buf1349 = torch.ops.aten.max_pool3d_with_indices.default(buf1348, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1348
buf1350 = buf1349[0]
del buf1349
buf1353 = torch.ops.aten.max_pool3d_with_indices.default(buf1352, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1352
buf1354 = buf1353[0]
del buf1353
buf1356 = buf1346
del buf1346
triton_poi_fused_minimum_neg_3[grid(256)](buf1356, buf1354, buf1350,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1357 = torch.ops.aten.max_pool3d_with_indices.default(buf1356, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1358 = buf1357[0]
del buf1357
buf1361 = torch.ops.aten.max_pool3d_with_indices.default(buf1360, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1362 = buf1361[0]
del buf1361
buf1365 = torch.ops.aten.max_pool3d_with_indices.default(buf1364, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1366 = buf1365[0]
del buf1365
buf1369 = torch.ops.aten.max_pool3d_with_indices.default(buf1368, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1370 = buf1369[0]
del buf1369
buf1372 = buf1368
del buf1368
buf1376 = buf1364
del buf1364
buf1380 = buf1360
del buf1360
buf1388 = buf1356
del buf1356
buf1392 = buf1354
del buf1354
buf1396 = buf1350
del buf1350
triton_poi_fused_minimum_neg_2[grid(256)](buf1362, buf1370, buf1366,
buf1372, buf1376, buf1380, buf1388, buf1392, buf1396, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1373 = torch.ops.aten.max_pool3d_with_indices.default(buf1372, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1372
buf1374 = buf1373[0]
del buf1373
buf1377 = torch.ops.aten.max_pool3d_with_indices.default(buf1376, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1376
buf1378 = buf1377[0]
del buf1377
buf1381 = torch.ops.aten.max_pool3d_with_indices.default(buf1380, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1380
buf1382 = buf1381[0]
del buf1381
buf1384 = buf1374
del buf1374
triton_poi_fused_minimum_neg_3[grid(256)](buf1384, buf1382, buf1378,
256, XBLOCK=256, num_warps=4, num_stages=1)
del buf1378
del buf1382
buf1385 = torch.ops.aten.max_pool3d_with_indices.default(buf1384, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
del buf1384
buf1386 = buf1385[0]
del buf1385
buf1389 = torch.ops.aten.max_pool3d_with_indices.default(buf1388, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1390 = buf1389[0]
del buf1389
buf1393 = torch.ops.aten.max_pool3d_with_indices.default(buf1392, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1394 = buf1393[0]
del buf1393
buf1397 = torch.ops.aten.max_pool3d_with_indices.default(buf1396, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1398 = buf1397[0]
del buf1397
buf1400 = buf1396
del buf1396
buf1404 = buf1392
del buf1392
buf1408 = buf1388
del buf1388
triton_poi_fused_minimum_neg_4[grid(256)](buf1390, buf1398, buf1394,
buf1400, buf1404, buf1408, 256, XBLOCK=256, num_warps=4,
num_stages=1)
buf1401 = torch.ops.aten.max_pool3d_with_indices.default(buf1400, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1402 = buf1401[0]
del buf1401
buf1405 = torch.ops.aten.max_pool3d_with_indices.default(buf1404, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1406 = buf1405[0]
del buf1405
buf1409 = torch.ops.aten.max_pool3d_with_indices.default(buf1408, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1410 = buf1409[0]
del buf1409
buf1412 = buf1402
del buf1402
triton_poi_fused_minimum_neg_3[grid(256)](buf1412, buf1410, buf1406,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1413 = torch.ops.aten.max_pool3d_with_indices.default(buf1412, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1414 = buf1413[0]
del buf1413
buf1416 = buf1412
del buf1412
buf1420 = buf1410
del buf1410
buf1424 = buf1406
del buf1406
buf1432 = buf1408
del buf1408
buf1436 = buf1404
del buf1404
buf1440 = buf1400
del buf1400
triton_poi_fused_neg_0[grid(256)](arg1_1, buf1416, buf1420, buf1424,
buf1432, buf1436, buf1440, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf1417 = torch.ops.aten.max_pool3d_with_indices.default(buf1416, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1416
buf1418 = buf1417[0]
del buf1417
buf1421 = torch.ops.aten.max_pool3d_with_indices.default(buf1420, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1420
buf1422 = buf1421[0]
del buf1421
buf1425 = torch.ops.aten.max_pool3d_with_indices.default(buf1424, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1424
buf1426 = buf1425[0]
del buf1425
buf1428 = buf1418
del buf1418
triton_poi_fused_minimum_neg_3[grid(256)](buf1428, buf1426, buf1422,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1429 = torch.ops.aten.max_pool3d_with_indices.default(buf1428, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1430 = buf1429[0]
del buf1429
buf1433 = torch.ops.aten.max_pool3d_with_indices.default(buf1432, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1434 = buf1433[0]
del buf1433
buf1437 = torch.ops.aten.max_pool3d_with_indices.default(buf1436, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1438 = buf1437[0]
del buf1437
buf1441 = torch.ops.aten.max_pool3d_with_indices.default(buf1440, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1442 = buf1441[0]
del buf1441
buf1444 = buf1440
del buf1440
buf1448 = buf1436
del buf1436
buf1452 = buf1432
del buf1432
buf1460 = buf1428
del buf1428
buf1464 = buf1426
del buf1426
buf1468 = buf1422
del buf1422
triton_poi_fused_minimum_neg_2[grid(256)](buf1434, buf1442, buf1438,
buf1444, buf1448, buf1452, buf1460, buf1464, buf1468, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1445 = torch.ops.aten.max_pool3d_with_indices.default(buf1444, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1444
buf1446 = buf1445[0]
del buf1445
buf1449 = torch.ops.aten.max_pool3d_with_indices.default(buf1448, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1448
buf1450 = buf1449[0]
del buf1449
buf1453 = torch.ops.aten.max_pool3d_with_indices.default(buf1452, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1452
buf1454 = buf1453[0]
del buf1453
buf1456 = buf1446
del buf1446
triton_poi_fused_minimum_neg_3[grid(256)](buf1456, buf1454, buf1450,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1457 = torch.ops.aten.max_pool3d_with_indices.default(buf1456, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1458 = buf1457[0]
del buf1457
buf1461 = torch.ops.aten.max_pool3d_with_indices.default(buf1460, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1462 = buf1461[0]
del buf1461
buf1465 = torch.ops.aten.max_pool3d_with_indices.default(buf1464, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1466 = buf1465[0]
del buf1465
buf1469 = torch.ops.aten.max_pool3d_with_indices.default(buf1468, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1470 = buf1469[0]
del buf1469
buf1472 = buf1468
del buf1468
buf1476 = buf1464
del buf1464
buf1480 = buf1460
del buf1460
buf1488 = buf1456
del buf1456
buf1492 = buf1454
del buf1454
buf1496 = buf1450
del buf1450
triton_poi_fused_minimum_neg_2[grid(256)](buf1462, buf1470, buf1466,
buf1472, buf1476, buf1480, buf1488, buf1492, buf1496, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1473 = torch.ops.aten.max_pool3d_with_indices.default(buf1472, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1472
buf1474 = buf1473[0]
del buf1473
buf1477 = torch.ops.aten.max_pool3d_with_indices.default(buf1476, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1476
buf1478 = buf1477[0]
del buf1477
buf1481 = torch.ops.aten.max_pool3d_with_indices.default(buf1480, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1480
buf1482 = buf1481[0]
del buf1481
buf1484 = buf1474
del buf1474
triton_poi_fused_minimum_neg_3[grid(256)](buf1484, buf1482, buf1478,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1485 = torch.ops.aten.max_pool3d_with_indices.default(buf1484, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1486 = buf1485[0]
del buf1485
buf1489 = torch.ops.aten.max_pool3d_with_indices.default(buf1488, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1490 = buf1489[0]
del buf1489
buf1493 = torch.ops.aten.max_pool3d_with_indices.default(buf1492, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1494 = buf1493[0]
del buf1493
buf1497 = torch.ops.aten.max_pool3d_with_indices.default(buf1496, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1498 = buf1497[0]
del buf1497
buf1500 = buf1496
del buf1496
buf1504 = buf1492
del buf1492
buf1508 = buf1488
del buf1488
buf1516 = buf1484
del buf1484
buf1520 = buf1482
del buf1482
buf1524 = buf1478
del buf1478
triton_poi_fused_minimum_neg_2[grid(256)](buf1490, buf1498, buf1494,
buf1500, buf1504, buf1508, buf1516, buf1520, buf1524, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1501 = torch.ops.aten.max_pool3d_with_indices.default(buf1500, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1500
buf1502 = buf1501[0]
del buf1501
buf1505 = torch.ops.aten.max_pool3d_with_indices.default(buf1504, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1504
buf1506 = buf1505[0]
del buf1505
buf1509 = torch.ops.aten.max_pool3d_with_indices.default(buf1508, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1508
buf1510 = buf1509[0]
del buf1509
buf1512 = buf1502
del buf1502
triton_poi_fused_minimum_neg_3[grid(256)](buf1512, buf1510, buf1506,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1513 = torch.ops.aten.max_pool3d_with_indices.default(buf1512, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1514 = buf1513[0]
del buf1513
buf1517 = torch.ops.aten.max_pool3d_with_indices.default(buf1516, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1518 = buf1517[0]
del buf1517
buf1521 = torch.ops.aten.max_pool3d_with_indices.default(buf1520, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1522 = buf1521[0]
del buf1521
buf1525 = torch.ops.aten.max_pool3d_with_indices.default(buf1524, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1526 = buf1525[0]
del buf1525
buf1528 = buf1524
del buf1524
buf1532 = buf1520
del buf1520
buf1536 = buf1516
del buf1516
buf1544 = buf1512
del buf1512
buf1548 = buf1510
del buf1510
buf1552 = buf1506
del buf1506
triton_poi_fused_minimum_neg_2[grid(256)](buf1518, buf1526, buf1522,
buf1528, buf1532, buf1536, buf1544, buf1548, buf1552, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1529 = torch.ops.aten.max_pool3d_with_indices.default(buf1528, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1528
buf1530 = buf1529[0]
del buf1529
buf1533 = torch.ops.aten.max_pool3d_with_indices.default(buf1532, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1532
buf1534 = buf1533[0]
del buf1533
buf1537 = torch.ops.aten.max_pool3d_with_indices.default(buf1536, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1536
buf1538 = buf1537[0]
del buf1537
buf1540 = buf1530
del buf1530
triton_poi_fused_minimum_neg_3[grid(256)](buf1540, buf1538, buf1534,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1541 = torch.ops.aten.max_pool3d_with_indices.default(buf1540, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1542 = buf1541[0]
del buf1541
buf1545 = torch.ops.aten.max_pool3d_with_indices.default(buf1544, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1546 = buf1545[0]
del buf1545
buf1549 = torch.ops.aten.max_pool3d_with_indices.default(buf1548, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1550 = buf1549[0]
del buf1549
buf1553 = torch.ops.aten.max_pool3d_with_indices.default(buf1552, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1554 = buf1553[0]
del buf1553
buf1556 = buf1552
del buf1552
buf1560 = buf1548
del buf1548
buf1564 = buf1544
del buf1544
buf1572 = buf1540
del buf1540
buf1576 = buf1538
del buf1538
buf1580 = buf1534
del buf1534
triton_poi_fused_minimum_neg_2[grid(256)](buf1546, buf1554, buf1550,
buf1556, buf1560, buf1564, buf1572, buf1576, buf1580, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1557 = torch.ops.aten.max_pool3d_with_indices.default(buf1556, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1556
buf1558 = buf1557[0]
del buf1557
buf1561 = torch.ops.aten.max_pool3d_with_indices.default(buf1560, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1560
buf1562 = buf1561[0]
del buf1561
buf1565 = torch.ops.aten.max_pool3d_with_indices.default(buf1564, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1564
buf1566 = buf1565[0]
del buf1565
buf1568 = buf1558
del buf1558
triton_poi_fused_minimum_neg_3[grid(256)](buf1568, buf1566, buf1562,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1569 = torch.ops.aten.max_pool3d_with_indices.default(buf1568, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1570 = buf1569[0]
del buf1569
buf1573 = torch.ops.aten.max_pool3d_with_indices.default(buf1572, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1574 = buf1573[0]
del buf1573
buf1577 = torch.ops.aten.max_pool3d_with_indices.default(buf1576, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1578 = buf1577[0]
del buf1577
buf1581 = torch.ops.aten.max_pool3d_with_indices.default(buf1580, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1582 = buf1581[0]
del buf1581
buf1584 = buf1580
del buf1580
buf1588 = buf1576
del buf1576
buf1592 = buf1572
del buf1572
buf1600 = buf1568
del buf1568
buf1604 = buf1566
del buf1566
buf1608 = buf1562
del buf1562
triton_poi_fused_minimum_neg_2[grid(256)](buf1574, buf1582, buf1578,
buf1584, buf1588, buf1592, buf1600, buf1604, buf1608, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1585 = torch.ops.aten.max_pool3d_with_indices.default(buf1584, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1584
buf1586 = buf1585[0]
del buf1585
buf1589 = torch.ops.aten.max_pool3d_with_indices.default(buf1588, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1588
buf1590 = buf1589[0]
del buf1589
buf1593 = torch.ops.aten.max_pool3d_with_indices.default(buf1592, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1592
buf1594 = buf1593[0]
del buf1593
buf1596 = buf1586
del buf1586
triton_poi_fused_minimum_neg_3[grid(256)](buf1596, buf1594, buf1590,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1597 = torch.ops.aten.max_pool3d_with_indices.default(buf1596, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1598 = buf1597[0]
del buf1597
buf1601 = torch.ops.aten.max_pool3d_with_indices.default(buf1600, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1602 = buf1601[0]
del buf1601
buf1605 = torch.ops.aten.max_pool3d_with_indices.default(buf1604, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1606 = buf1605[0]
del buf1605
buf1609 = torch.ops.aten.max_pool3d_with_indices.default(buf1608, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1610 = buf1609[0]
del buf1609
buf1612 = buf1608
del buf1608
buf1616 = buf1604
del buf1604
buf1620 = buf1600
del buf1600
buf1628 = buf1596
del buf1596
buf1632 = buf1594
del buf1594
buf1636 = buf1590
del buf1590
triton_poi_fused_minimum_neg_2[grid(256)](buf1602, buf1610, buf1606,
buf1612, buf1616, buf1620, buf1628, buf1632, buf1636, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1613 = torch.ops.aten.max_pool3d_with_indices.default(buf1612, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1612
buf1614 = buf1613[0]
del buf1613
buf1617 = torch.ops.aten.max_pool3d_with_indices.default(buf1616, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1616
buf1618 = buf1617[0]
del buf1617
buf1621 = torch.ops.aten.max_pool3d_with_indices.default(buf1620, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1620
buf1622 = buf1621[0]
del buf1621
buf1624 = buf1614
del buf1614
triton_poi_fused_minimum_neg_3[grid(256)](buf1624, buf1622, buf1618,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1625 = torch.ops.aten.max_pool3d_with_indices.default(buf1624, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1626 = buf1625[0]
del buf1625
buf1629 = torch.ops.aten.max_pool3d_with_indices.default(buf1628, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1630 = buf1629[0]
del buf1629
buf1633 = torch.ops.aten.max_pool3d_with_indices.default(buf1632, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1634 = buf1633[0]
del buf1633
buf1637 = torch.ops.aten.max_pool3d_with_indices.default(buf1636, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1638 = buf1637[0]
del buf1637
buf1640 = buf1636
del buf1636
buf1644 = buf1632
del buf1632
buf1648 = buf1628
del buf1628
buf1656 = buf1624
del buf1624
buf1660 = buf1622
del buf1622
buf1664 = buf1618
del buf1618
triton_poi_fused_minimum_neg_2[grid(256)](buf1630, buf1638, buf1634,
buf1640, buf1644, buf1648, buf1656, buf1660, buf1664, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1641 = torch.ops.aten.max_pool3d_with_indices.default(buf1640, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1640
buf1642 = buf1641[0]
del buf1641
buf1645 = torch.ops.aten.max_pool3d_with_indices.default(buf1644, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1644
buf1646 = buf1645[0]
del buf1645
buf1649 = torch.ops.aten.max_pool3d_with_indices.default(buf1648, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1648
buf1650 = buf1649[0]
del buf1649
buf1652 = buf1642
del buf1642
triton_poi_fused_minimum_neg_3[grid(256)](buf1652, buf1650, buf1646,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1653 = torch.ops.aten.max_pool3d_with_indices.default(buf1652, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1654 = buf1653[0]
del buf1653
buf1657 = torch.ops.aten.max_pool3d_with_indices.default(buf1656, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1658 = buf1657[0]
del buf1657
buf1661 = torch.ops.aten.max_pool3d_with_indices.default(buf1660, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1662 = buf1661[0]
del buf1661
buf1665 = torch.ops.aten.max_pool3d_with_indices.default(buf1664, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1666 = buf1665[0]
del buf1665
buf1668 = buf1664
del buf1664
buf1672 = buf1660
del buf1660
buf1676 = buf1656
del buf1656
buf1684 = buf1652
del buf1652
buf1688 = buf1650
del buf1650
buf1692 = buf1646
del buf1646
triton_poi_fused_minimum_neg_2[grid(256)](buf1658, buf1666, buf1662,
buf1668, buf1672, buf1676, buf1684, buf1688, buf1692, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1669 = torch.ops.aten.max_pool3d_with_indices.default(buf1668, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1668
buf1670 = buf1669[0]
del buf1669
buf1673 = torch.ops.aten.max_pool3d_with_indices.default(buf1672, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1672
buf1674 = buf1673[0]
del buf1673
buf1677 = torch.ops.aten.max_pool3d_with_indices.default(buf1676, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1676
buf1678 = buf1677[0]
del buf1677
buf1680 = buf1670
del buf1670
triton_poi_fused_minimum_neg_3[grid(256)](buf1680, buf1678, buf1674,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1681 = torch.ops.aten.max_pool3d_with_indices.default(buf1680, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1682 = buf1681[0]
del buf1681
buf1685 = torch.ops.aten.max_pool3d_with_indices.default(buf1684, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1686 = buf1685[0]
del buf1685
buf1689 = torch.ops.aten.max_pool3d_with_indices.default(buf1688, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1690 = buf1689[0]
del buf1689
buf1693 = torch.ops.aten.max_pool3d_with_indices.default(buf1692, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1694 = buf1693[0]
del buf1693
buf1696 = buf1692
del buf1692
buf1700 = buf1688
del buf1688
buf1704 = buf1684
del buf1684
buf1712 = buf1680
del buf1680
buf1716 = buf1678
del buf1678
buf1720 = buf1674
del buf1674
triton_poi_fused_minimum_neg_2[grid(256)](buf1686, buf1694, buf1690,
buf1696, buf1700, buf1704, buf1712, buf1716, buf1720, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1697 = torch.ops.aten.max_pool3d_with_indices.default(buf1696, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1696
buf1698 = buf1697[0]
del buf1697
buf1701 = torch.ops.aten.max_pool3d_with_indices.default(buf1700, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1700
buf1702 = buf1701[0]
del buf1701
buf1705 = torch.ops.aten.max_pool3d_with_indices.default(buf1704, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1704
buf1706 = buf1705[0]
del buf1705
buf1708 = buf1698
del buf1698
triton_poi_fused_minimum_neg_3[grid(256)](buf1708, buf1706, buf1702,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1709 = torch.ops.aten.max_pool3d_with_indices.default(buf1708, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1710 = buf1709[0]
del buf1709
buf1713 = torch.ops.aten.max_pool3d_with_indices.default(buf1712, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1714 = buf1713[0]
del buf1713
buf1717 = torch.ops.aten.max_pool3d_with_indices.default(buf1716, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1718 = buf1717[0]
del buf1717
buf1721 = torch.ops.aten.max_pool3d_with_indices.default(buf1720, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1722 = buf1721[0]
del buf1721
buf1724 = buf1720
del buf1720
buf1728 = buf1716
del buf1716
buf1732 = buf1712
del buf1712
buf1740 = buf1708
del buf1708
buf1744 = buf1706
del buf1706
buf1748 = buf1702
del buf1702
triton_poi_fused_minimum_neg_2[grid(256)](buf1714, buf1722, buf1718,
buf1724, buf1728, buf1732, buf1740, buf1744, buf1748, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1725 = torch.ops.aten.max_pool3d_with_indices.default(buf1724, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1724
buf1726 = buf1725[0]
del buf1725
buf1729 = torch.ops.aten.max_pool3d_with_indices.default(buf1728, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1728
buf1730 = buf1729[0]
del buf1729
buf1733 = torch.ops.aten.max_pool3d_with_indices.default(buf1732, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1732
buf1734 = buf1733[0]
del buf1733
buf1736 = buf1726
del buf1726
triton_poi_fused_minimum_neg_3[grid(256)](buf1736, buf1734, buf1730,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1737 = torch.ops.aten.max_pool3d_with_indices.default(buf1736, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1738 = buf1737[0]
del buf1737
buf1741 = torch.ops.aten.max_pool3d_with_indices.default(buf1740, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1742 = buf1741[0]
del buf1741
buf1745 = torch.ops.aten.max_pool3d_with_indices.default(buf1744, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1746 = buf1745[0]
del buf1745
buf1749 = torch.ops.aten.max_pool3d_with_indices.default(buf1748, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1750 = buf1749[0]
del buf1749
buf1752 = buf1748
del buf1748
buf1756 = buf1744
del buf1744
buf1760 = buf1740
del buf1740
buf1768 = buf1736
del buf1736
buf1772 = buf1734
del buf1734
buf1776 = buf1730
del buf1730
triton_poi_fused_minimum_neg_2[grid(256)](buf1742, buf1750, buf1746,
buf1752, buf1756, buf1760, buf1768, buf1772, buf1776, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1753 = torch.ops.aten.max_pool3d_with_indices.default(buf1752, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1752
buf1754 = buf1753[0]
del buf1753
buf1757 = torch.ops.aten.max_pool3d_with_indices.default(buf1756, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1756
buf1758 = buf1757[0]
del buf1757
buf1761 = torch.ops.aten.max_pool3d_with_indices.default(buf1760, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1760
buf1762 = buf1761[0]
del buf1761
buf1764 = buf1754
del buf1754
triton_poi_fused_minimum_neg_3[grid(256)](buf1764, buf1762, buf1758,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1765 = torch.ops.aten.max_pool3d_with_indices.default(buf1764, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1766 = buf1765[0]
del buf1765
buf1769 = torch.ops.aten.max_pool3d_with_indices.default(buf1768, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1770 = buf1769[0]
del buf1769
buf1773 = torch.ops.aten.max_pool3d_with_indices.default(buf1772, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1774 = buf1773[0]
del buf1773
buf1777 = torch.ops.aten.max_pool3d_with_indices.default(buf1776, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1778 = buf1777[0]
del buf1777
buf1780 = buf1776
del buf1776
buf1784 = buf1772
del buf1772
buf1788 = buf1768
del buf1768
buf1796 = buf1764
del buf1764
buf1800 = buf1762
del buf1762
buf1804 = buf1758
del buf1758
triton_poi_fused_minimum_neg_2[grid(256)](buf1770, buf1778, buf1774,
buf1780, buf1784, buf1788, buf1796, buf1800, buf1804, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1781 = torch.ops.aten.max_pool3d_with_indices.default(buf1780, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1780
buf1782 = buf1781[0]
del buf1781
buf1785 = torch.ops.aten.max_pool3d_with_indices.default(buf1784, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1784
buf1786 = buf1785[0]
del buf1785
buf1789 = torch.ops.aten.max_pool3d_with_indices.default(buf1788, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1788
buf1790 = buf1789[0]
del buf1789
buf1792 = buf1782
del buf1782
triton_poi_fused_minimum_neg_3[grid(256)](buf1792, buf1790, buf1786,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1793 = torch.ops.aten.max_pool3d_with_indices.default(buf1792, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1794 = buf1793[0]
del buf1793
buf1797 = torch.ops.aten.max_pool3d_with_indices.default(buf1796, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1798 = buf1797[0]
del buf1797
buf1801 = torch.ops.aten.max_pool3d_with_indices.default(buf1800, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1802 = buf1801[0]
del buf1801
buf1805 = torch.ops.aten.max_pool3d_with_indices.default(buf1804, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1806 = buf1805[0]
del buf1805
buf1808 = buf1804
del buf1804
buf1812 = buf1800
del buf1800
buf1816 = buf1796
del buf1796
buf1824 = buf1792
del buf1792
buf1828 = buf1790
del buf1790
buf1832 = buf1786
del buf1786
triton_poi_fused_minimum_neg_2[grid(256)](buf1798, buf1806, buf1802,
buf1808, buf1812, buf1816, buf1824, buf1828, buf1832, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1809 = torch.ops.aten.max_pool3d_with_indices.default(buf1808, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1808
buf1810 = buf1809[0]
del buf1809
buf1813 = torch.ops.aten.max_pool3d_with_indices.default(buf1812, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1812
buf1814 = buf1813[0]
del buf1813
buf1817 = torch.ops.aten.max_pool3d_with_indices.default(buf1816, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1816
buf1818 = buf1817[0]
del buf1817
buf1820 = buf1810
del buf1810
triton_poi_fused_minimum_neg_3[grid(256)](buf1820, buf1818, buf1814,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1821 = torch.ops.aten.max_pool3d_with_indices.default(buf1820, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1822 = buf1821[0]
del buf1821
buf1825 = torch.ops.aten.max_pool3d_with_indices.default(buf1824, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1826 = buf1825[0]
del buf1825
buf1829 = torch.ops.aten.max_pool3d_with_indices.default(buf1828, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1830 = buf1829[0]
del buf1829
buf1833 = torch.ops.aten.max_pool3d_with_indices.default(buf1832, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1834 = buf1833[0]
del buf1833
buf1836 = buf1832
del buf1832
buf1840 = buf1828
del buf1828
buf1844 = buf1824
del buf1824
buf1852 = buf1820
del buf1820
buf1856 = buf1818
del buf1818
buf1860 = buf1814
del buf1814
triton_poi_fused_minimum_neg_2[grid(256)](buf1826, buf1834, buf1830,
buf1836, buf1840, buf1844, buf1852, buf1856, buf1860, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1837 = torch.ops.aten.max_pool3d_with_indices.default(buf1836, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1836
buf1838 = buf1837[0]
del buf1837
buf1841 = torch.ops.aten.max_pool3d_with_indices.default(buf1840, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1840
buf1842 = buf1841[0]
del buf1841
buf1845 = torch.ops.aten.max_pool3d_with_indices.default(buf1844, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1844
buf1846 = buf1845[0]
del buf1845
buf1848 = buf1838
del buf1838
triton_poi_fused_minimum_neg_3[grid(256)](buf1848, buf1846, buf1842,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1849 = torch.ops.aten.max_pool3d_with_indices.default(buf1848, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1850 = buf1849[0]
del buf1849
buf1853 = torch.ops.aten.max_pool3d_with_indices.default(buf1852, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1854 = buf1853[0]
del buf1853
buf1857 = torch.ops.aten.max_pool3d_with_indices.default(buf1856, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1858 = buf1857[0]
del buf1857
buf1861 = torch.ops.aten.max_pool3d_with_indices.default(buf1860, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1862 = buf1861[0]
del buf1861
buf1864 = buf1860
del buf1860
buf1868 = buf1856
del buf1856
buf1872 = buf1852
del buf1852
buf1880 = buf1848
del buf1848
buf1884 = buf1846
del buf1846
buf1888 = buf1842
del buf1842
triton_poi_fused_minimum_neg_2[grid(256)](buf1854, buf1862, buf1858,
buf1864, buf1868, buf1872, buf1880, buf1884, buf1888, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1865 = torch.ops.aten.max_pool3d_with_indices.default(buf1864, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1864
buf1866 = buf1865[0]
del buf1865
buf1869 = torch.ops.aten.max_pool3d_with_indices.default(buf1868, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1868
buf1870 = buf1869[0]
del buf1869
buf1873 = torch.ops.aten.max_pool3d_with_indices.default(buf1872, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1872
buf1874 = buf1873[0]
del buf1873
buf1876 = buf1866
del buf1866
triton_poi_fused_minimum_neg_3[grid(256)](buf1876, buf1874, buf1870,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1877 = torch.ops.aten.max_pool3d_with_indices.default(buf1876, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1878 = buf1877[0]
del buf1877
buf1881 = torch.ops.aten.max_pool3d_with_indices.default(buf1880, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1882 = buf1881[0]
del buf1881
buf1885 = torch.ops.aten.max_pool3d_with_indices.default(buf1884, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1886 = buf1885[0]
del buf1885
buf1889 = torch.ops.aten.max_pool3d_with_indices.default(buf1888, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1890 = buf1889[0]
del buf1889
buf1892 = buf1888
del buf1888
buf1896 = buf1884
del buf1884
buf1900 = buf1880
del buf1880
buf1908 = buf1876
del buf1876
buf1912 = buf1874
del buf1874
buf1916 = buf1870
del buf1870
triton_poi_fused_minimum_neg_2[grid(256)](buf1882, buf1890, buf1886,
buf1892, buf1896, buf1900, buf1908, buf1912, buf1916, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1893 = torch.ops.aten.max_pool3d_with_indices.default(buf1892, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1892
buf1894 = buf1893[0]
del buf1893
buf1897 = torch.ops.aten.max_pool3d_with_indices.default(buf1896, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1896
buf1898 = buf1897[0]
del buf1897
buf1901 = torch.ops.aten.max_pool3d_with_indices.default(buf1900, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1900
buf1902 = buf1901[0]
del buf1901
buf1904 = buf1894
del buf1894
triton_poi_fused_minimum_neg_3[grid(256)](buf1904, buf1902, buf1898,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1905 = torch.ops.aten.max_pool3d_with_indices.default(buf1904, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1906 = buf1905[0]
del buf1905
buf1909 = torch.ops.aten.max_pool3d_with_indices.default(buf1908, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1910 = buf1909[0]
del buf1909
buf1913 = torch.ops.aten.max_pool3d_with_indices.default(buf1912, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1914 = buf1913[0]
del buf1913
buf1917 = torch.ops.aten.max_pool3d_with_indices.default(buf1916, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1918 = buf1917[0]
del buf1917
buf1920 = buf1916
del buf1916
buf1924 = buf1912
del buf1912
buf1928 = buf1908
del buf1908
buf1936 = buf1904
del buf1904
buf1940 = buf1902
del buf1902
buf1944 = buf1898
del buf1898
triton_poi_fused_minimum_neg_2[grid(256)](buf1910, buf1918, buf1914,
buf1920, buf1924, buf1928, buf1936, buf1940, buf1944, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1921 = torch.ops.aten.max_pool3d_with_indices.default(buf1920, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1920
buf1922 = buf1921[0]
del buf1921
buf1925 = torch.ops.aten.max_pool3d_with_indices.default(buf1924, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1924
buf1926 = buf1925[0]
del buf1925
buf1929 = torch.ops.aten.max_pool3d_with_indices.default(buf1928, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1928
buf1930 = buf1929[0]
del buf1929
buf1932 = buf1922
del buf1922
triton_poi_fused_minimum_neg_3[grid(256)](buf1932, buf1930, buf1926,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1933 = torch.ops.aten.max_pool3d_with_indices.default(buf1932, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1934 = buf1933[0]
del buf1933
buf1937 = torch.ops.aten.max_pool3d_with_indices.default(buf1936, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1938 = buf1937[0]
del buf1937
buf1941 = torch.ops.aten.max_pool3d_with_indices.default(buf1940, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1942 = buf1941[0]
del buf1941
buf1945 = torch.ops.aten.max_pool3d_with_indices.default(buf1944, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1946 = buf1945[0]
del buf1945
buf1948 = buf1944
del buf1944
buf1952 = buf1940
del buf1940
buf1956 = buf1936
del buf1936
buf1964 = buf1932
del buf1932
buf1968 = buf1930
del buf1930
buf1972 = buf1926
del buf1926
triton_poi_fused_minimum_neg_2[grid(256)](buf1938, buf1946, buf1942,
buf1948, buf1952, buf1956, buf1964, buf1968, buf1972, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1949 = torch.ops.aten.max_pool3d_with_indices.default(buf1948, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1948
buf1950 = buf1949[0]
del buf1949
buf1953 = torch.ops.aten.max_pool3d_with_indices.default(buf1952, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1952
buf1954 = buf1953[0]
del buf1953
buf1957 = torch.ops.aten.max_pool3d_with_indices.default(buf1956, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1956
buf1958 = buf1957[0]
del buf1957
buf1960 = buf1950
del buf1950
triton_poi_fused_minimum_neg_3[grid(256)](buf1960, buf1958, buf1954,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1961 = torch.ops.aten.max_pool3d_with_indices.default(buf1960, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1962 = buf1961[0]
del buf1961
buf1965 = torch.ops.aten.max_pool3d_with_indices.default(buf1964, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1966 = buf1965[0]
del buf1965
buf1969 = torch.ops.aten.max_pool3d_with_indices.default(buf1968, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1970 = buf1969[0]
del buf1969
buf1973 = torch.ops.aten.max_pool3d_with_indices.default(buf1972, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf1974 = buf1973[0]
del buf1973
buf1976 = buf1972
del buf1972
buf1980 = buf1968
del buf1968
buf1984 = buf1964
del buf1964
buf1992 = buf1960
del buf1960
buf1996 = buf1958
del buf1958
buf2000 = buf1954
del buf1954
triton_poi_fused_minimum_neg_2[grid(256)](buf1966, buf1974, buf1970,
buf1976, buf1980, buf1984, buf1992, buf1996, buf2000, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1977 = torch.ops.aten.max_pool3d_with_indices.default(buf1976, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf1976
buf1978 = buf1977[0]
del buf1977
buf1981 = torch.ops.aten.max_pool3d_with_indices.default(buf1980, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf1980
buf1982 = buf1981[0]
del buf1981
buf1985 = torch.ops.aten.max_pool3d_with_indices.default(buf1984, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf1984
buf1986 = buf1985[0]
del buf1985
buf1988 = buf1978
del buf1978
triton_poi_fused_minimum_neg_3[grid(256)](buf1988, buf1986, buf1982,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1989 = torch.ops.aten.max_pool3d_with_indices.default(buf1988, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf1990 = buf1989[0]
del buf1989
buf1993 = torch.ops.aten.max_pool3d_with_indices.default(buf1992, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf1994 = buf1993[0]
del buf1993
buf1997 = torch.ops.aten.max_pool3d_with_indices.default(buf1996, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf1998 = buf1997[0]
del buf1997
buf2001 = torch.ops.aten.max_pool3d_with_indices.default(buf2000, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2002 = buf2001[0]
del buf2001
buf2004 = buf2000
del buf2000
buf2008 = buf1996
del buf1996
buf2012 = buf1992
del buf1992
buf2020 = buf1988
del buf1988
buf2024 = buf1986
del buf1986
buf2028 = buf1982
del buf1982
triton_poi_fused_minimum_neg_2[grid(256)](buf1994, buf2002, buf1998,
buf2004, buf2008, buf2012, buf2020, buf2024, buf2028, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2005 = torch.ops.aten.max_pool3d_with_indices.default(buf2004, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2004
buf2006 = buf2005[0]
del buf2005
buf2009 = torch.ops.aten.max_pool3d_with_indices.default(buf2008, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2008
buf2010 = buf2009[0]
del buf2009
buf2013 = torch.ops.aten.max_pool3d_with_indices.default(buf2012, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2012
buf2014 = buf2013[0]
del buf2013
buf2016 = buf2006
del buf2006
triton_poi_fused_minimum_neg_3[grid(256)](buf2016, buf2014, buf2010,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2017 = torch.ops.aten.max_pool3d_with_indices.default(buf2016, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2018 = buf2017[0]
del buf2017
buf2021 = torch.ops.aten.max_pool3d_with_indices.default(buf2020, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2022 = buf2021[0]
del buf2021
buf2025 = torch.ops.aten.max_pool3d_with_indices.default(buf2024, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2026 = buf2025[0]
del buf2025
buf2029 = torch.ops.aten.max_pool3d_with_indices.default(buf2028, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2030 = buf2029[0]
del buf2029
buf2032 = buf2028
del buf2028
buf2036 = buf2024
del buf2024
buf2040 = buf2020
del buf2020
buf2048 = buf2016
del buf2016
buf2052 = buf2014
del buf2014
buf2056 = buf2010
del buf2010
triton_poi_fused_minimum_neg_2[grid(256)](buf2022, buf2030, buf2026,
buf2032, buf2036, buf2040, buf2048, buf2052, buf2056, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2033 = torch.ops.aten.max_pool3d_with_indices.default(buf2032, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2032
buf2034 = buf2033[0]
del buf2033
buf2037 = torch.ops.aten.max_pool3d_with_indices.default(buf2036, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2036
buf2038 = buf2037[0]
del buf2037
buf2041 = torch.ops.aten.max_pool3d_with_indices.default(buf2040, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2040
buf2042 = buf2041[0]
del buf2041
buf2044 = buf2034
del buf2034
triton_poi_fused_minimum_neg_3[grid(256)](buf2044, buf2042, buf2038,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2045 = torch.ops.aten.max_pool3d_with_indices.default(buf2044, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2046 = buf2045[0]
del buf2045
buf2049 = torch.ops.aten.max_pool3d_with_indices.default(buf2048, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2050 = buf2049[0]
del buf2049
buf2053 = torch.ops.aten.max_pool3d_with_indices.default(buf2052, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2054 = buf2053[0]
del buf2053
buf2057 = torch.ops.aten.max_pool3d_with_indices.default(buf2056, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2058 = buf2057[0]
del buf2057
buf2060 = buf2056
del buf2056
buf2064 = buf2052
del buf2052
buf2068 = buf2048
del buf2048
buf2076 = buf2044
del buf2044
buf2080 = buf2042
del buf2042
buf2084 = buf2038
del buf2038
triton_poi_fused_minimum_neg_2[grid(256)](buf2050, buf2058, buf2054,
buf2060, buf2064, buf2068, buf2076, buf2080, buf2084, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2061 = torch.ops.aten.max_pool3d_with_indices.default(buf2060, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2060
buf2062 = buf2061[0]
del buf2061
buf2065 = torch.ops.aten.max_pool3d_with_indices.default(buf2064, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2064
buf2066 = buf2065[0]
del buf2065
buf2069 = torch.ops.aten.max_pool3d_with_indices.default(buf2068, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2068
buf2070 = buf2069[0]
del buf2069
buf2072 = buf2062
del buf2062
triton_poi_fused_minimum_neg_3[grid(256)](buf2072, buf2070, buf2066,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2073 = torch.ops.aten.max_pool3d_with_indices.default(buf2072, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2074 = buf2073[0]
del buf2073
buf2077 = torch.ops.aten.max_pool3d_with_indices.default(buf2076, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2078 = buf2077[0]
del buf2077
buf2081 = torch.ops.aten.max_pool3d_with_indices.default(buf2080, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2082 = buf2081[0]
del buf2081
buf2085 = torch.ops.aten.max_pool3d_with_indices.default(buf2084, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2086 = buf2085[0]
del buf2085
buf2088 = buf2084
del buf2084
buf2092 = buf2080
del buf2080
buf2096 = buf2076
del buf2076
buf2104 = buf2072
del buf2072
buf2108 = buf2070
del buf2070
buf2112 = buf2066
del buf2066
triton_poi_fused_minimum_neg_2[grid(256)](buf2078, buf2086, buf2082,
buf2088, buf2092, buf2096, buf2104, buf2108, buf2112, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2089 = torch.ops.aten.max_pool3d_with_indices.default(buf2088, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2088
buf2090 = buf2089[0]
del buf2089
buf2093 = torch.ops.aten.max_pool3d_with_indices.default(buf2092, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2092
buf2094 = buf2093[0]
del buf2093
buf2097 = torch.ops.aten.max_pool3d_with_indices.default(buf2096, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2096
buf2098 = buf2097[0]
del buf2097
buf2100 = buf2090
del buf2090
triton_poi_fused_minimum_neg_3[grid(256)](buf2100, buf2098, buf2094,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2101 = torch.ops.aten.max_pool3d_with_indices.default(buf2100, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2102 = buf2101[0]
del buf2101
buf2105 = torch.ops.aten.max_pool3d_with_indices.default(buf2104, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2106 = buf2105[0]
del buf2105
buf2109 = torch.ops.aten.max_pool3d_with_indices.default(buf2108, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2110 = buf2109[0]
del buf2109
buf2113 = torch.ops.aten.max_pool3d_with_indices.default(buf2112, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2114 = buf2113[0]
del buf2113
buf2116 = buf2112
del buf2112
buf2120 = buf2108
del buf2108
buf2124 = buf2104
del buf2104
buf2132 = buf2100
del buf2100
buf2136 = buf2098
del buf2098
buf2140 = buf2094
del buf2094
triton_poi_fused_minimum_neg_2[grid(256)](buf2106, buf2114, buf2110,
buf2116, buf2120, buf2124, buf2132, buf2136, buf2140, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2117 = torch.ops.aten.max_pool3d_with_indices.default(buf2116, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2116
buf2118 = buf2117[0]
del buf2117
buf2121 = torch.ops.aten.max_pool3d_with_indices.default(buf2120, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2120
buf2122 = buf2121[0]
del buf2121
buf2125 = torch.ops.aten.max_pool3d_with_indices.default(buf2124, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2124
buf2126 = buf2125[0]
del buf2125
buf2128 = buf2118
del buf2118
triton_poi_fused_minimum_neg_3[grid(256)](buf2128, buf2126, buf2122,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2129 = torch.ops.aten.max_pool3d_with_indices.default(buf2128, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2130 = buf2129[0]
del buf2129
buf2133 = torch.ops.aten.max_pool3d_with_indices.default(buf2132, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2134 = buf2133[0]
del buf2133
buf2137 = torch.ops.aten.max_pool3d_with_indices.default(buf2136, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2138 = buf2137[0]
del buf2137
buf2141 = torch.ops.aten.max_pool3d_with_indices.default(buf2140, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2142 = buf2141[0]
del buf2141
buf2144 = buf2140
del buf2140
buf2148 = buf2136
del buf2136
buf2152 = buf2132
del buf2132
buf2160 = buf2128
del buf2128
buf2164 = buf2126
del buf2126
buf2168 = buf2122
del buf2122
triton_poi_fused_minimum_neg_2[grid(256)](buf2134, buf2142, buf2138,
buf2144, buf2148, buf2152, buf2160, buf2164, buf2168, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2145 = torch.ops.aten.max_pool3d_with_indices.default(buf2144, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2144
buf2146 = buf2145[0]
del buf2145
buf2149 = torch.ops.aten.max_pool3d_with_indices.default(buf2148, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2148
buf2150 = buf2149[0]
del buf2149
buf2153 = torch.ops.aten.max_pool3d_with_indices.default(buf2152, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2152
buf2154 = buf2153[0]
del buf2153
buf2156 = buf2146
del buf2146
triton_poi_fused_minimum_neg_3[grid(256)](buf2156, buf2154, buf2150,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2157 = torch.ops.aten.max_pool3d_with_indices.default(buf2156, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2158 = buf2157[0]
del buf2157
buf2161 = torch.ops.aten.max_pool3d_with_indices.default(buf2160, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2162 = buf2161[0]
del buf2161
buf2165 = torch.ops.aten.max_pool3d_with_indices.default(buf2164, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2166 = buf2165[0]
del buf2165
buf2169 = torch.ops.aten.max_pool3d_with_indices.default(buf2168, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2170 = buf2169[0]
del buf2169
buf2172 = buf2168
del buf2168
buf2176 = buf2164
del buf2164
buf2180 = buf2160
del buf2160
buf2188 = buf2156
del buf2156
buf2192 = buf2154
del buf2154
buf2196 = buf2150
del buf2150
triton_poi_fused_minimum_neg_2[grid(256)](buf2162, buf2170, buf2166,
buf2172, buf2176, buf2180, buf2188, buf2192, buf2196, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2173 = torch.ops.aten.max_pool3d_with_indices.default(buf2172, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2172
buf2174 = buf2173[0]
del buf2173
buf2177 = torch.ops.aten.max_pool3d_with_indices.default(buf2176, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2176
buf2178 = buf2177[0]
del buf2177
buf2181 = torch.ops.aten.max_pool3d_with_indices.default(buf2180, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2180
buf2182 = buf2181[0]
del buf2181
buf2184 = buf2174
del buf2174
triton_poi_fused_minimum_neg_3[grid(256)](buf2184, buf2182, buf2178,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2185 = torch.ops.aten.max_pool3d_with_indices.default(buf2184, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2186 = buf2185[0]
del buf2185
buf2189 = torch.ops.aten.max_pool3d_with_indices.default(buf2188, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2190 = buf2189[0]
del buf2189
buf2193 = torch.ops.aten.max_pool3d_with_indices.default(buf2192, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2194 = buf2193[0]
del buf2193
buf2197 = torch.ops.aten.max_pool3d_with_indices.default(buf2196, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2198 = buf2197[0]
del buf2197
buf2200 = buf2196
del buf2196
buf2204 = buf2192
del buf2192
buf2208 = buf2188
del buf2188
buf2216 = buf2184
del buf2184
buf2220 = buf2182
del buf2182
buf2224 = buf2178
del buf2178
triton_poi_fused_minimum_neg_2[grid(256)](buf2190, buf2198, buf2194,
buf2200, buf2204, buf2208, buf2216, buf2220, buf2224, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2201 = torch.ops.aten.max_pool3d_with_indices.default(buf2200, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2200
buf2202 = buf2201[0]
del buf2201
buf2205 = torch.ops.aten.max_pool3d_with_indices.default(buf2204, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2204
buf2206 = buf2205[0]
del buf2205
buf2209 = torch.ops.aten.max_pool3d_with_indices.default(buf2208, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2208
buf2210 = buf2209[0]
del buf2209
buf2212 = buf2202
del buf2202
triton_poi_fused_minimum_neg_3[grid(256)](buf2212, buf2210, buf2206,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2213 = torch.ops.aten.max_pool3d_with_indices.default(buf2212, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2214 = buf2213[0]
del buf2213
buf2217 = torch.ops.aten.max_pool3d_with_indices.default(buf2216, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2218 = buf2217[0]
del buf2217
buf2221 = torch.ops.aten.max_pool3d_with_indices.default(buf2220, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2222 = buf2221[0]
del buf2221
buf2225 = torch.ops.aten.max_pool3d_with_indices.default(buf2224, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2226 = buf2225[0]
del buf2225
buf2228 = buf2224
del buf2224
buf2232 = buf2220
del buf2220
buf2236 = buf2216
del buf2216
buf2244 = buf2212
del buf2212
buf2248 = buf2210
del buf2210
buf2252 = buf2206
del buf2206
triton_poi_fused_minimum_neg_2[grid(256)](buf2218, buf2226, buf2222,
buf2228, buf2232, buf2236, buf2244, buf2248, buf2252, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2229 = torch.ops.aten.max_pool3d_with_indices.default(buf2228, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2228
buf2230 = buf2229[0]
del buf2229
buf2233 = torch.ops.aten.max_pool3d_with_indices.default(buf2232, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2232
buf2234 = buf2233[0]
del buf2233
buf2237 = torch.ops.aten.max_pool3d_with_indices.default(buf2236, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2236
buf2238 = buf2237[0]
del buf2237
buf2240 = buf2230
del buf2230
triton_poi_fused_minimum_neg_3[grid(256)](buf2240, buf2238, buf2234,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2241 = torch.ops.aten.max_pool3d_with_indices.default(buf2240, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2242 = buf2241[0]
del buf2241
buf2245 = torch.ops.aten.max_pool3d_with_indices.default(buf2244, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2246 = buf2245[0]
del buf2245
buf2249 = torch.ops.aten.max_pool3d_with_indices.default(buf2248, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2250 = buf2249[0]
del buf2249
buf2253 = torch.ops.aten.max_pool3d_with_indices.default(buf2252, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2254 = buf2253[0]
del buf2253
buf2256 = buf2252
del buf2252
buf2260 = buf2248
del buf2248
buf2264 = buf2244
del buf2244
buf2272 = buf2240
del buf2240
buf2276 = buf2238
del buf2238
buf2280 = buf2234
del buf2234
triton_poi_fused_minimum_neg_2[grid(256)](buf2246, buf2254, buf2250,
buf2256, buf2260, buf2264, buf2272, buf2276, buf2280, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2257 = torch.ops.aten.max_pool3d_with_indices.default(buf2256, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2256
buf2258 = buf2257[0]
del buf2257
buf2261 = torch.ops.aten.max_pool3d_with_indices.default(buf2260, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2260
buf2262 = buf2261[0]
del buf2261
buf2265 = torch.ops.aten.max_pool3d_with_indices.default(buf2264, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2264
buf2266 = buf2265[0]
del buf2265
buf2268 = buf2258
del buf2258
triton_poi_fused_minimum_neg_3[grid(256)](buf2268, buf2266, buf2262,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2269 = torch.ops.aten.max_pool3d_with_indices.default(buf2268, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2270 = buf2269[0]
del buf2269
buf2273 = torch.ops.aten.max_pool3d_with_indices.default(buf2272, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2274 = buf2273[0]
del buf2273
buf2277 = torch.ops.aten.max_pool3d_with_indices.default(buf2276, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2278 = buf2277[0]
del buf2277
buf2281 = torch.ops.aten.max_pool3d_with_indices.default(buf2280, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2282 = buf2281[0]
del buf2281
buf2284 = buf2280
del buf2280
buf2288 = buf2276
del buf2276
buf2292 = buf2272
del buf2272
buf2300 = buf2268
del buf2268
buf2304 = buf2266
del buf2266
buf2308 = buf2262
del buf2262
triton_poi_fused_minimum_neg_2[grid(256)](buf2274, buf2282, buf2278,
buf2284, buf2288, buf2292, buf2300, buf2304, buf2308, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2285 = torch.ops.aten.max_pool3d_with_indices.default(buf2284, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2284
buf2286 = buf2285[0]
del buf2285
buf2289 = torch.ops.aten.max_pool3d_with_indices.default(buf2288, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2288
buf2290 = buf2289[0]
del buf2289
buf2293 = torch.ops.aten.max_pool3d_with_indices.default(buf2292, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2292
buf2294 = buf2293[0]
del buf2293
buf2296 = buf2286
del buf2286
triton_poi_fused_minimum_neg_3[grid(256)](buf2296, buf2294, buf2290,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2297 = torch.ops.aten.max_pool3d_with_indices.default(buf2296, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2298 = buf2297[0]
del buf2297
buf2301 = torch.ops.aten.max_pool3d_with_indices.default(buf2300, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2302 = buf2301[0]
del buf2301
buf2305 = torch.ops.aten.max_pool3d_with_indices.default(buf2304, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2306 = buf2305[0]
del buf2305
buf2309 = torch.ops.aten.max_pool3d_with_indices.default(buf2308, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2310 = buf2309[0]
del buf2309
buf2312 = buf2308
del buf2308
buf2316 = buf2304
del buf2304
buf2320 = buf2300
del buf2300
buf2328 = buf2296
del buf2296
buf2332 = buf2294
del buf2294
buf2336 = buf2290
del buf2290
triton_poi_fused_minimum_neg_2[grid(256)](buf2302, buf2310, buf2306,
buf2312, buf2316, buf2320, buf2328, buf2332, buf2336, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2313 = torch.ops.aten.max_pool3d_with_indices.default(buf2312, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2312
buf2314 = buf2313[0]
del buf2313
buf2317 = torch.ops.aten.max_pool3d_with_indices.default(buf2316, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2316
buf2318 = buf2317[0]
del buf2317
buf2321 = torch.ops.aten.max_pool3d_with_indices.default(buf2320, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2320
buf2322 = buf2321[0]
del buf2321
buf2324 = buf2314
del buf2314
triton_poi_fused_minimum_neg_3[grid(256)](buf2324, buf2322, buf2318,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2325 = torch.ops.aten.max_pool3d_with_indices.default(buf2324, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2326 = buf2325[0]
del buf2325
buf2329 = torch.ops.aten.max_pool3d_with_indices.default(buf2328, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2330 = buf2329[0]
del buf2329
buf2333 = torch.ops.aten.max_pool3d_with_indices.default(buf2332, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2334 = buf2333[0]
del buf2333
buf2337 = torch.ops.aten.max_pool3d_with_indices.default(buf2336, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2338 = buf2337[0]
del buf2337
buf2340 = buf2336
del buf2336
buf2344 = buf2332
del buf2332
buf2348 = buf2328
del buf2328
buf2356 = buf2324
del buf2324
buf2360 = buf2322
del buf2322
buf2364 = buf2318
del buf2318
triton_poi_fused_minimum_neg_2[grid(256)](buf2330, buf2338, buf2334,
buf2340, buf2344, buf2348, buf2356, buf2360, buf2364, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2341 = torch.ops.aten.max_pool3d_with_indices.default(buf2340, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2340
buf2342 = buf2341[0]
del buf2341
buf2345 = torch.ops.aten.max_pool3d_with_indices.default(buf2344, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2344
buf2346 = buf2345[0]
del buf2345
buf2349 = torch.ops.aten.max_pool3d_with_indices.default(buf2348, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2348
buf2350 = buf2349[0]
del buf2349
buf2352 = buf2342
del buf2342
triton_poi_fused_minimum_neg_3[grid(256)](buf2352, buf2350, buf2346,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2353 = torch.ops.aten.max_pool3d_with_indices.default(buf2352, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2354 = buf2353[0]
del buf2353
buf2357 = torch.ops.aten.max_pool3d_with_indices.default(buf2356, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2358 = buf2357[0]
del buf2357
buf2361 = torch.ops.aten.max_pool3d_with_indices.default(buf2360, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2362 = buf2361[0]
del buf2361
buf2365 = torch.ops.aten.max_pool3d_with_indices.default(buf2364, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2366 = buf2365[0]
del buf2365
buf2368 = buf2364
del buf2364
buf2372 = buf2360
del buf2360
buf2376 = buf2356
del buf2356
buf2384 = buf2352
del buf2352
buf2388 = buf2350
del buf2350
buf2392 = buf2346
del buf2346
triton_poi_fused_minimum_neg_2[grid(256)](buf2358, buf2366, buf2362,
buf2368, buf2372, buf2376, buf2384, buf2388, buf2392, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2369 = torch.ops.aten.max_pool3d_with_indices.default(buf2368, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2368
buf2370 = buf2369[0]
del buf2369
buf2373 = torch.ops.aten.max_pool3d_with_indices.default(buf2372, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2372
buf2374 = buf2373[0]
del buf2373
buf2377 = torch.ops.aten.max_pool3d_with_indices.default(buf2376, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2376
buf2378 = buf2377[0]
del buf2377
buf2380 = buf2370
del buf2370
triton_poi_fused_minimum_neg_3[grid(256)](buf2380, buf2378, buf2374,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2381 = torch.ops.aten.max_pool3d_with_indices.default(buf2380, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2382 = buf2381[0]
del buf2381
buf2385 = torch.ops.aten.max_pool3d_with_indices.default(buf2384, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2386 = buf2385[0]
del buf2385
buf2389 = torch.ops.aten.max_pool3d_with_indices.default(buf2388, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2390 = buf2389[0]
del buf2389
buf2393 = torch.ops.aten.max_pool3d_with_indices.default(buf2392, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2394 = buf2393[0]
del buf2393
buf2396 = buf2392
del buf2392
buf2400 = buf2388
del buf2388
buf2404 = buf2384
del buf2384
buf2412 = buf2380
del buf2380
buf2416 = buf2378
del buf2378
buf2420 = buf2374
del buf2374
triton_poi_fused_minimum_neg_2[grid(256)](buf2386, buf2394, buf2390,
buf2396, buf2400, buf2404, buf2412, buf2416, buf2420, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2397 = torch.ops.aten.max_pool3d_with_indices.default(buf2396, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2396
buf2398 = buf2397[0]
del buf2397
buf2401 = torch.ops.aten.max_pool3d_with_indices.default(buf2400, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2400
buf2402 = buf2401[0]
del buf2401
buf2405 = torch.ops.aten.max_pool3d_with_indices.default(buf2404, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2404
buf2406 = buf2405[0]
del buf2405
buf2408 = buf2398
del buf2398
triton_poi_fused_minimum_neg_3[grid(256)](buf2408, buf2406, buf2402,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2409 = torch.ops.aten.max_pool3d_with_indices.default(buf2408, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2410 = buf2409[0]
del buf2409
buf2413 = torch.ops.aten.max_pool3d_with_indices.default(buf2412, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2414 = buf2413[0]
del buf2413
buf2417 = torch.ops.aten.max_pool3d_with_indices.default(buf2416, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2418 = buf2417[0]
del buf2417
buf2421 = torch.ops.aten.max_pool3d_with_indices.default(buf2420, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2422 = buf2421[0]
del buf2421
buf2424 = buf2420
del buf2420
buf2428 = buf2416
del buf2416
buf2432 = buf2412
del buf2412
buf2440 = buf2408
del buf2408
buf2444 = buf2406
del buf2406
buf2448 = buf2402
del buf2402
triton_poi_fused_minimum_neg_2[grid(256)](buf2414, buf2422, buf2418,
buf2424, buf2428, buf2432, buf2440, buf2444, buf2448, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2425 = torch.ops.aten.max_pool3d_with_indices.default(buf2424, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2424
buf2426 = buf2425[0]
del buf2425
buf2429 = torch.ops.aten.max_pool3d_with_indices.default(buf2428, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2428
buf2430 = buf2429[0]
del buf2429
buf2433 = torch.ops.aten.max_pool3d_with_indices.default(buf2432, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2432
buf2434 = buf2433[0]
del buf2433
buf2436 = buf2426
del buf2426
triton_poi_fused_minimum_neg_3[grid(256)](buf2436, buf2434, buf2430,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2437 = torch.ops.aten.max_pool3d_with_indices.default(buf2436, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2438 = buf2437[0]
del buf2437
buf2441 = torch.ops.aten.max_pool3d_with_indices.default(buf2440, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2442 = buf2441[0]
del buf2441
buf2445 = torch.ops.aten.max_pool3d_with_indices.default(buf2444, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2446 = buf2445[0]
del buf2445
buf2449 = torch.ops.aten.max_pool3d_with_indices.default(buf2448, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2450 = buf2449[0]
del buf2449
buf2452 = buf2448
del buf2448
buf2456 = buf2444
del buf2444
buf2460 = buf2440
del buf2440
buf2468 = buf2436
del buf2436
buf2472 = buf2434
del buf2434
buf2476 = buf2430
del buf2430
triton_poi_fused_minimum_neg_2[grid(256)](buf2442, buf2450, buf2446,
buf2452, buf2456, buf2460, buf2468, buf2472, buf2476, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2453 = torch.ops.aten.max_pool3d_with_indices.default(buf2452, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2452
buf2454 = buf2453[0]
del buf2453
buf2457 = torch.ops.aten.max_pool3d_with_indices.default(buf2456, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2456
buf2458 = buf2457[0]
del buf2457
buf2461 = torch.ops.aten.max_pool3d_with_indices.default(buf2460, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2460
buf2462 = buf2461[0]
del buf2461
buf2464 = buf2454
del buf2454
triton_poi_fused_minimum_neg_3[grid(256)](buf2464, buf2462, buf2458,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2465 = torch.ops.aten.max_pool3d_with_indices.default(buf2464, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2466 = buf2465[0]
del buf2465
buf2469 = torch.ops.aten.max_pool3d_with_indices.default(buf2468, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2470 = buf2469[0]
del buf2469
buf2473 = torch.ops.aten.max_pool3d_with_indices.default(buf2472, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2474 = buf2473[0]
del buf2473
buf2477 = torch.ops.aten.max_pool3d_with_indices.default(buf2476, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2478 = buf2477[0]
del buf2477
buf2480 = buf2476
del buf2476
buf2484 = buf2472
del buf2472
buf2488 = buf2468
del buf2468
buf2496 = buf2464
del buf2464
buf2500 = buf2462
del buf2462
buf2504 = buf2458
del buf2458
triton_poi_fused_minimum_neg_2[grid(256)](buf2470, buf2478, buf2474,
buf2480, buf2484, buf2488, buf2496, buf2500, buf2504, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2481 = torch.ops.aten.max_pool3d_with_indices.default(buf2480, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2480
buf2482 = buf2481[0]
del buf2481
buf2485 = torch.ops.aten.max_pool3d_with_indices.default(buf2484, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2484
buf2486 = buf2485[0]
del buf2485
buf2489 = torch.ops.aten.max_pool3d_with_indices.default(buf2488, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2488
buf2490 = buf2489[0]
del buf2489
buf2492 = buf2482
del buf2482
triton_poi_fused_minimum_neg_3[grid(256)](buf2492, buf2490, buf2486,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2493 = torch.ops.aten.max_pool3d_with_indices.default(buf2492, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2494 = buf2493[0]
del buf2493
buf2497 = torch.ops.aten.max_pool3d_with_indices.default(buf2496, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2498 = buf2497[0]
del buf2497
buf2501 = torch.ops.aten.max_pool3d_with_indices.default(buf2500, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2502 = buf2501[0]
del buf2501
buf2505 = torch.ops.aten.max_pool3d_with_indices.default(buf2504, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2506 = buf2505[0]
del buf2505
buf2508 = buf2504
del buf2504
buf2512 = buf2500
del buf2500
buf2516 = buf2496
del buf2496
buf2524 = buf2492
del buf2492
buf2528 = buf2490
del buf2490
buf2532 = buf2486
del buf2486
triton_poi_fused_minimum_neg_2[grid(256)](buf2498, buf2506, buf2502,
buf2508, buf2512, buf2516, buf2524, buf2528, buf2532, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2509 = torch.ops.aten.max_pool3d_with_indices.default(buf2508, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2508
buf2510 = buf2509[0]
del buf2509
buf2513 = torch.ops.aten.max_pool3d_with_indices.default(buf2512, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2512
buf2514 = buf2513[0]
del buf2513
buf2517 = torch.ops.aten.max_pool3d_with_indices.default(buf2516, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2516
buf2518 = buf2517[0]
del buf2517
buf2520 = buf2510
del buf2510
triton_poi_fused_minimum_neg_3[grid(256)](buf2520, buf2518, buf2514,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2521 = torch.ops.aten.max_pool3d_with_indices.default(buf2520, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2522 = buf2521[0]
del buf2521
buf2525 = torch.ops.aten.max_pool3d_with_indices.default(buf2524, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2526 = buf2525[0]
del buf2525
buf2529 = torch.ops.aten.max_pool3d_with_indices.default(buf2528, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2530 = buf2529[0]
del buf2529
buf2533 = torch.ops.aten.max_pool3d_with_indices.default(buf2532, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2534 = buf2533[0]
del buf2533
buf2536 = buf2532
del buf2532
buf2540 = buf2528
del buf2528
buf2544 = buf2524
del buf2524
buf2552 = buf2520
del buf2520
buf2556 = buf2518
del buf2518
buf2560 = buf2514
del buf2514
triton_poi_fused_minimum_neg_2[grid(256)](buf2526, buf2534, buf2530,
buf2536, buf2540, buf2544, buf2552, buf2556, buf2560, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2537 = torch.ops.aten.max_pool3d_with_indices.default(buf2536, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2536
buf2538 = buf2537[0]
del buf2537
buf2541 = torch.ops.aten.max_pool3d_with_indices.default(buf2540, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2540
buf2542 = buf2541[0]
del buf2541
buf2545 = torch.ops.aten.max_pool3d_with_indices.default(buf2544, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2544
buf2546 = buf2545[0]
del buf2545
buf2548 = buf2538
del buf2538
triton_poi_fused_minimum_neg_3[grid(256)](buf2548, buf2546, buf2542,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2549 = torch.ops.aten.max_pool3d_with_indices.default(buf2548, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2550 = buf2549[0]
del buf2549
buf2553 = torch.ops.aten.max_pool3d_with_indices.default(buf2552, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2554 = buf2553[0]
del buf2553
buf2557 = torch.ops.aten.max_pool3d_with_indices.default(buf2556, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2558 = buf2557[0]
del buf2557
buf2561 = torch.ops.aten.max_pool3d_with_indices.default(buf2560, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2562 = buf2561[0]
del buf2561
buf2564 = buf2560
del buf2560
buf2568 = buf2556
del buf2556
buf2572 = buf2552
del buf2552
buf2580 = buf2548
del buf2548
buf2584 = buf2546
del buf2546
buf2588 = buf2542
del buf2542
triton_poi_fused_minimum_neg_2[grid(256)](buf2554, buf2562, buf2558,
buf2564, buf2568, buf2572, buf2580, buf2584, buf2588, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2565 = torch.ops.aten.max_pool3d_with_indices.default(buf2564, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2564
buf2566 = buf2565[0]
del buf2565
buf2569 = torch.ops.aten.max_pool3d_with_indices.default(buf2568, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2568
buf2570 = buf2569[0]
del buf2569
buf2573 = torch.ops.aten.max_pool3d_with_indices.default(buf2572, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2572
buf2574 = buf2573[0]
del buf2573
buf2576 = buf2566
del buf2566
triton_poi_fused_minimum_neg_3[grid(256)](buf2576, buf2574, buf2570,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2577 = torch.ops.aten.max_pool3d_with_indices.default(buf2576, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2578 = buf2577[0]
del buf2577
buf2581 = torch.ops.aten.max_pool3d_with_indices.default(buf2580, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2582 = buf2581[0]
del buf2581
buf2585 = torch.ops.aten.max_pool3d_with_indices.default(buf2584, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2586 = buf2585[0]
del buf2585
buf2589 = torch.ops.aten.max_pool3d_with_indices.default(buf2588, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2590 = buf2589[0]
del buf2589
buf2592 = buf2588
del buf2588
buf2596 = buf2584
del buf2584
buf2600 = buf2580
del buf2580
buf2608 = buf2576
del buf2576
buf2612 = buf2574
del buf2574
buf2616 = buf2570
del buf2570
triton_poi_fused_minimum_neg_2[grid(256)](buf2582, buf2590, buf2586,
buf2592, buf2596, buf2600, buf2608, buf2612, buf2616, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2593 = torch.ops.aten.max_pool3d_with_indices.default(buf2592, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2592
buf2594 = buf2593[0]
del buf2593
buf2597 = torch.ops.aten.max_pool3d_with_indices.default(buf2596, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2596
buf2598 = buf2597[0]
del buf2597
buf2601 = torch.ops.aten.max_pool3d_with_indices.default(buf2600, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2600
buf2602 = buf2601[0]
del buf2601
buf2604 = buf2594
del buf2594
triton_poi_fused_minimum_neg_3[grid(256)](buf2604, buf2602, buf2598,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2605 = torch.ops.aten.max_pool3d_with_indices.default(buf2604, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2606 = buf2605[0]
del buf2605
buf2609 = torch.ops.aten.max_pool3d_with_indices.default(buf2608, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2610 = buf2609[0]
del buf2609
buf2613 = torch.ops.aten.max_pool3d_with_indices.default(buf2612, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2614 = buf2613[0]
del buf2613
buf2617 = torch.ops.aten.max_pool3d_with_indices.default(buf2616, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2618 = buf2617[0]
del buf2617
buf2620 = buf2616
del buf2616
buf2624 = buf2612
del buf2612
buf2628 = buf2608
del buf2608
buf2636 = buf2604
del buf2604
buf2640 = buf2602
del buf2602
buf2644 = buf2598
del buf2598
triton_poi_fused_minimum_neg_2[grid(256)](buf2610, buf2618, buf2614,
buf2620, buf2624, buf2628, buf2636, buf2640, buf2644, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2621 = torch.ops.aten.max_pool3d_with_indices.default(buf2620, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2620
buf2622 = buf2621[0]
del buf2621
buf2625 = torch.ops.aten.max_pool3d_with_indices.default(buf2624, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2624
buf2626 = buf2625[0]
del buf2625
buf2629 = torch.ops.aten.max_pool3d_with_indices.default(buf2628, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2628
buf2630 = buf2629[0]
del buf2629
buf2632 = buf2622
del buf2622
triton_poi_fused_minimum_neg_3[grid(256)](buf2632, buf2630, buf2626,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2633 = torch.ops.aten.max_pool3d_with_indices.default(buf2632, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2634 = buf2633[0]
del buf2633
buf2637 = torch.ops.aten.max_pool3d_with_indices.default(buf2636, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2638 = buf2637[0]
del buf2637
buf2641 = torch.ops.aten.max_pool3d_with_indices.default(buf2640, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2642 = buf2641[0]
del buf2641
buf2645 = torch.ops.aten.max_pool3d_with_indices.default(buf2644, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2646 = buf2645[0]
del buf2645
buf2648 = buf2644
del buf2644
buf2652 = buf2640
del buf2640
buf2656 = buf2636
del buf2636
buf2664 = buf2632
del buf2632
buf2668 = buf2630
del buf2630
buf2672 = buf2626
del buf2626
triton_poi_fused_minimum_neg_2[grid(256)](buf2638, buf2646, buf2642,
buf2648, buf2652, buf2656, buf2664, buf2668, buf2672, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2649 = torch.ops.aten.max_pool3d_with_indices.default(buf2648, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2648
buf2650 = buf2649[0]
del buf2649
buf2653 = torch.ops.aten.max_pool3d_with_indices.default(buf2652, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2652
buf2654 = buf2653[0]
del buf2653
buf2657 = torch.ops.aten.max_pool3d_with_indices.default(buf2656, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2656
buf2658 = buf2657[0]
del buf2657
buf2660 = buf2650
del buf2650
triton_poi_fused_minimum_neg_3[grid(256)](buf2660, buf2658, buf2654,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2661 = torch.ops.aten.max_pool3d_with_indices.default(buf2660, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2662 = buf2661[0]
del buf2661
buf2665 = torch.ops.aten.max_pool3d_with_indices.default(buf2664, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2666 = buf2665[0]
del buf2665
buf2669 = torch.ops.aten.max_pool3d_with_indices.default(buf2668, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2670 = buf2669[0]
del buf2669
buf2673 = torch.ops.aten.max_pool3d_with_indices.default(buf2672, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2674 = buf2673[0]
del buf2673
buf2676 = buf2672
del buf2672
buf2680 = buf2668
del buf2668
buf2684 = buf2664
del buf2664
buf2692 = buf2660
del buf2660
buf2696 = buf2658
del buf2658
buf2700 = buf2654
del buf2654
triton_poi_fused_minimum_neg_2[grid(256)](buf2666, buf2674, buf2670,
buf2676, buf2680, buf2684, buf2692, buf2696, buf2700, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2677 = torch.ops.aten.max_pool3d_with_indices.default(buf2676, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2676
buf2678 = buf2677[0]
del buf2677
buf2681 = torch.ops.aten.max_pool3d_with_indices.default(buf2680, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2680
buf2682 = buf2681[0]
del buf2681
buf2685 = torch.ops.aten.max_pool3d_with_indices.default(buf2684, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2684
buf2686 = buf2685[0]
del buf2685
buf2688 = buf2678
del buf2678
triton_poi_fused_minimum_neg_3[grid(256)](buf2688, buf2686, buf2682,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2689 = torch.ops.aten.max_pool3d_with_indices.default(buf2688, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2690 = buf2689[0]
del buf2689
buf2693 = torch.ops.aten.max_pool3d_with_indices.default(buf2692, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2694 = buf2693[0]
del buf2693
buf2697 = torch.ops.aten.max_pool3d_with_indices.default(buf2696, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2698 = buf2697[0]
del buf2697
buf2701 = torch.ops.aten.max_pool3d_with_indices.default(buf2700, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2702 = buf2701[0]
del buf2701
buf2704 = buf2700
del buf2700
buf2708 = buf2696
del buf2696
buf2712 = buf2692
del buf2692
buf2720 = buf2688
del buf2688
buf2724 = buf2686
del buf2686
buf2728 = buf2682
del buf2682
triton_poi_fused_minimum_neg_2[grid(256)](buf2694, buf2702, buf2698,
buf2704, buf2708, buf2712, buf2720, buf2724, buf2728, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2705 = torch.ops.aten.max_pool3d_with_indices.default(buf2704, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2704
buf2706 = buf2705[0]
del buf2705
buf2709 = torch.ops.aten.max_pool3d_with_indices.default(buf2708, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2708
buf2710 = buf2709[0]
del buf2709
buf2713 = torch.ops.aten.max_pool3d_with_indices.default(buf2712, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2712
buf2714 = buf2713[0]
del buf2713
buf2716 = buf2706
del buf2706
triton_poi_fused_minimum_neg_3[grid(256)](buf2716, buf2714, buf2710,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2717 = torch.ops.aten.max_pool3d_with_indices.default(buf2716, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2718 = buf2717[0]
del buf2717
buf2721 = torch.ops.aten.max_pool3d_with_indices.default(buf2720, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2722 = buf2721[0]
del buf2721
buf2725 = torch.ops.aten.max_pool3d_with_indices.default(buf2724, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2726 = buf2725[0]
del buf2725
buf2729 = torch.ops.aten.max_pool3d_with_indices.default(buf2728, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2730 = buf2729[0]
del buf2729
buf2732 = buf2728
del buf2728
buf2736 = buf2724
del buf2724
buf2740 = buf2720
del buf2720
buf2748 = buf2716
del buf2716
buf2752 = buf2714
del buf2714
buf2756 = buf2710
del buf2710
triton_poi_fused_minimum_neg_2[grid(256)](buf2722, buf2730, buf2726,
buf2732, buf2736, buf2740, buf2748, buf2752, buf2756, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2733 = torch.ops.aten.max_pool3d_with_indices.default(buf2732, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2732
buf2734 = buf2733[0]
del buf2733
buf2737 = torch.ops.aten.max_pool3d_with_indices.default(buf2736, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2736
buf2738 = buf2737[0]
del buf2737
buf2741 = torch.ops.aten.max_pool3d_with_indices.default(buf2740, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2740
buf2742 = buf2741[0]
del buf2741
buf2744 = buf2734
del buf2734
triton_poi_fused_minimum_neg_3[grid(256)](buf2744, buf2742, buf2738,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2745 = torch.ops.aten.max_pool3d_with_indices.default(buf2744, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2746 = buf2745[0]
del buf2745
buf2749 = torch.ops.aten.max_pool3d_with_indices.default(buf2748, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2750 = buf2749[0]
del buf2749
buf2753 = torch.ops.aten.max_pool3d_with_indices.default(buf2752, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2754 = buf2753[0]
del buf2753
buf2757 = torch.ops.aten.max_pool3d_with_indices.default(buf2756, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2758 = buf2757[0]
del buf2757
buf2760 = buf2756
del buf2756
buf2764 = buf2752
del buf2752
buf2768 = buf2748
del buf2748
buf2776 = buf2744
del buf2744
buf2780 = buf2742
del buf2742
buf2784 = buf2738
del buf2738
triton_poi_fused_minimum_neg_2[grid(256)](buf2750, buf2758, buf2754,
buf2760, buf2764, buf2768, buf2776, buf2780, buf2784, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2761 = torch.ops.aten.max_pool3d_with_indices.default(buf2760, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2760
buf2762 = buf2761[0]
del buf2761
buf2765 = torch.ops.aten.max_pool3d_with_indices.default(buf2764, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2764
buf2766 = buf2765[0]
del buf2765
buf2769 = torch.ops.aten.max_pool3d_with_indices.default(buf2768, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2768
buf2770 = buf2769[0]
del buf2769
buf2772 = buf2762
del buf2762
triton_poi_fused_minimum_neg_3[grid(256)](buf2772, buf2770, buf2766,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2773 = torch.ops.aten.max_pool3d_with_indices.default(buf2772, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
buf2774 = buf2773[0]
del buf2773
buf2777 = torch.ops.aten.max_pool3d_with_indices.default(buf2776, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2778 = buf2777[0]
del buf2777
buf2781 = torch.ops.aten.max_pool3d_with_indices.default(buf2780, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2782 = buf2781[0]
del buf2781
buf2785 = torch.ops.aten.max_pool3d_with_indices.default(buf2784, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2786 = buf2785[0]
del buf2785
buf2788 = buf2784
del buf2784
buf2792 = buf2780
del buf2780
buf2796 = buf2776
del buf2776
buf2804 = buf2772
del buf2772
buf2808 = buf2770
del buf2770
buf2812 = buf2766
del buf2766
triton_poi_fused_minimum_neg_2[grid(256)](buf2778, buf2786, buf2782,
buf2788, buf2792, buf2796, buf2804, buf2808, buf2812, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2789 = torch.ops.aten.max_pool3d_with_indices.default(buf2788, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2788
buf2790 = buf2789[0]
del buf2789
buf2793 = torch.ops.aten.max_pool3d_with_indices.default(buf2792, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2792
buf2794 = buf2793[0]
del buf2793
buf2797 = torch.ops.aten.max_pool3d_with_indices.default(buf2796, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2796
buf2798 = buf2797[0]
del buf2797
buf2800 = buf2790
del buf2790
triton_poi_fused_minimum_neg_3[grid(256)](buf2800, buf2798, buf2794,
256, XBLOCK=256, num_warps=4, num_stages=1)
del buf2794
del buf2798
buf2801 = torch.ops.aten.max_pool3d_with_indices.default(buf2800, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
del buf2800
buf2802 = buf2801[0]
del buf2801
buf2805 = torch.ops.aten.max_pool3d_with_indices.default(buf2804, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
buf2806 = buf2805[0]
del buf2805
buf2809 = torch.ops.aten.max_pool3d_with_indices.default(buf2808, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
buf2810 = buf2809[0]
del buf2809
buf2813 = torch.ops.aten.max_pool3d_with_indices.default(buf2812, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
buf2814 = buf2813[0]
del buf2813
buf2816 = buf2812
del buf2812
buf2820 = buf2808
del buf2808
buf2824 = buf2804
del buf2804
triton_poi_fused_minimum_neg_4[grid(256)](buf2806, buf2814, buf2810,
buf2816, buf2820, buf2824, 256, XBLOCK=256, num_warps=4,
num_stages=1)
buf2817 = torch.ops.aten.max_pool3d_with_indices.default(buf2816, [
3, 1, 1], [1, 1, 1], [1, 0, 0])
del buf2816
buf2818 = buf2817[0]
del buf2817
buf2821 = torch.ops.aten.max_pool3d_with_indices.default(buf2820, [
1, 3, 1], [1, 1, 1], [0, 1, 0])
del buf2820
buf2822 = buf2821[0]
del buf2821
buf2825 = torch.ops.aten.max_pool3d_with_indices.default(buf2824, [
1, 1, 3], [1, 1, 1], [0, 0, 1])
del buf2824
buf2826 = buf2825[0]
del buf2825
buf2828 = buf2818
del buf2818
triton_poi_fused_minimum_neg_3[grid(256)](buf2828, buf2826, buf2822,
256, XBLOCK=256, num_warps=4, num_stages=1)
del buf2822
del buf2826
buf2829 = torch.ops.aten.max_pool3d_with_indices.default(buf2828, [
3, 3, 3], [1, 1, 1], [1, 1, 1])
del buf2828
buf2830 = buf2829[0]
del buf2829
buf2832 = buf1430
del buf1430
buf2833 = buf1462
del buf1462
buf2834 = buf1490
del buf1490
buf2835 = buf1518
del buf1518
buf2836 = buf1546
del buf1546
buf2837 = buf1574
del buf1574
buf2838 = buf1602
del buf1602
buf2839 = buf1630
del buf1630
buf2840 = buf1658
del buf1658
buf2841 = buf1686
del buf1686
buf2842 = buf1714
del buf1714
buf2843 = buf1742
del buf1742
buf2844 = buf1770
del buf1770
buf2845 = buf1798
del buf1798
buf2846 = buf1826
del buf1826
buf2847 = buf1854
del buf1854
buf2848 = buf1882
del buf1882
buf2849 = buf1910
del buf1910
buf2850 = buf1938
del buf1938
buf2851 = buf1966
del buf1966
buf2852 = buf1994
del buf1994
buf2853 = buf2022
del buf2022
buf2854 = buf2050
del buf2050
buf2855 = buf2078
del buf2078
buf2856 = buf2106
del buf2106
buf2857 = buf2134
del buf2134
buf2858 = buf2162
del buf2162
buf2859 = buf2190
del buf2190
buf2860 = buf2218
del buf2218
buf2861 = buf2246
del buf2246
buf2862 = buf2274
del buf2274
buf2863 = buf2302
del buf2302
buf2864 = buf2330
del buf2330
buf2865 = buf2358
del buf2358
buf2866 = buf2386
del buf2386
buf2867 = buf2414
del buf2414
buf2868 = buf2442
del buf2442
buf2869 = buf2470
del buf2470
buf2870 = buf2498
del buf2498
buf2871 = buf2526
del buf2526
buf2872 = buf2554
del buf2554
buf2873 = buf2582
del buf2582
buf2874 = buf2610
del buf2610
buf2875 = buf2638
del buf2638
buf2876 = buf2666
del buf2666
buf2877 = buf2694
del buf2694
buf2878 = buf2722
del buf2722
buf2879 = buf2750
del buf2750
buf2880 = buf2778
del buf2778
buf2881 = buf2806
del buf2806
triton_poi_fused_add_minimum_mul_neg_relu_sub_5[grid(256)](buf2832,
buf2833, buf2834, buf2835, buf2836, buf2837, buf2838, buf2839,
buf2840, buf2841, buf2842, buf2843, buf2844, buf2845, buf2846,
buf2847, buf2848, buf2849, buf2850, buf2851, buf2852, buf2853,
buf2854, buf2855, buf2856, buf2857, buf2858, buf2859, buf2860,
buf2861, buf2862, buf2863, buf2864, buf2865, buf2866, buf2867,
buf2868, buf2869, buf2870, buf2871, buf2872, buf2873, buf2874,
buf2875, buf2876, buf2877, buf2878, buf2879, buf2880, buf2881,
arg1_1, buf1434, buf1442, buf1438, buf1458, buf1470, buf1466,
buf1486, buf1498, buf1494, buf1514, buf1526, buf1522, buf1542,
buf1554, buf1550, buf1570, buf1582, buf1578, buf1598, buf1610,
buf1606, buf1626, buf1638, buf1634, buf1654, buf1666, buf1662,
buf1682, buf1694, buf1690, buf1710, buf1722, buf1718, buf1738,
buf1750, buf1746, buf1766, buf1778, buf1774, buf1794, buf1806,
buf1802, buf1822, buf1834, buf1830, buf1850, buf1862, buf1858,
buf1878, buf1890, buf1886, buf1906, buf1918, buf1914, buf1934,
buf1946, buf1942, buf1962, buf1974, buf1970, buf1990, buf2002,
buf1998, buf2018, buf2030, buf2026, buf2046, buf2058, buf2054,
buf2074, buf2086, buf2082, buf2102, buf2114, buf2110, buf2130,
buf2142, buf2138, buf2158, buf2170, buf2166, buf2186, buf2198,
buf2194, buf2214, buf2226, buf2222, buf2242, buf2254, buf2250,
buf2270, buf2282, buf2278, buf2298, buf2310, buf2306, buf2326,
buf2338, buf2334, buf2354, buf2366, buf2362, buf2382, buf2394,
buf2390, buf2410, buf2422, buf2418, buf2438, buf2450, buf2446,
buf2466, buf2478, buf2474, buf2494, buf2506, buf2502, buf2522,
buf2534, buf2530, buf2550, buf2562, buf2558, buf2578, buf2590,
buf2586, buf2606, buf2618, buf2614, buf2634, buf2646, buf2642,
buf2662, buf2674, buf2670, buf2690, buf2702, buf2698, buf2718,
buf2730, buf2726, buf2746, buf2758, buf2754, buf2774, buf2786,
buf2782, buf2802, buf2814, buf2810, buf2830, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg1_1
del buf1434
del buf1438
del buf1442
del buf1458
del buf1466
del buf1470
del buf1486
del buf1494
del buf1498
del buf1514
del buf1522
del buf1526
del buf1542
del buf1550
del buf1554
del buf1570
del buf1578
del buf1582
del buf1598
del buf1606
del buf1610
del buf1626
del buf1634
del buf1638
del buf1654
del buf1662
del buf1666
del buf1682
del buf1690
del buf1694
del buf1710
del buf1718
del buf1722
del buf1738
del buf1746
del buf1750
del buf1766
del buf1774
del buf1778
del buf1794
del buf1802
del buf1806
del buf1822
del buf1830
del buf1834
del buf1850
del buf1858
del buf1862
del buf1878
del buf1886
del buf1890
del buf1906
del buf1914
del buf1918
del buf1934
del buf1942
del buf1946
del buf1962
del buf1970
del buf1974
del buf1990
del buf1998
del buf2002
del buf2018
del buf2026
del buf2030
del buf2046
del buf2054
del buf2058
del buf2074
del buf2082
del buf2086
del buf2102
del buf2110
del buf2114
del buf2130
del buf2138
del buf2142
del buf2158
del buf2166
del buf2170
del buf2186
del buf2194
del buf2198
del buf2214
del buf2222
del buf2226
del buf2242
del buf2250
del buf2254
del buf2270
del buf2278
del buf2282
del buf2298
del buf2306
del buf2310
del buf2326
del buf2334
del buf2338
del buf2354
del buf2362
del buf2366
del buf2382
del buf2390
del buf2394
del buf2410
del buf2418
del buf2422
del buf2438
del buf2446
del buf2450
del buf2466
del buf2474
del buf2478
del buf2494
del buf2502
del buf2506
del buf2522
del buf2530
del buf2534
del buf2550
del buf2558
del buf2562
del buf2578
del buf2586
del buf2590
del buf2606
del buf2614
del buf2618
del buf2634
del buf2642
del buf2646
del buf2662
del buf2670
del buf2674
del buf2690
del buf2698
del buf2702
del buf2718
del buf2726
del buf2730
del buf2746
del buf2754
del buf2758
del buf2774
del buf2782
del buf2786
del buf2802
del buf2810
del buf2814
del buf2830
del buf2832
del buf2833
del buf2834
del buf2835
del buf2836
del buf2837
del buf2838
del buf2839
del buf2840
del buf2841
del buf2842
del buf2843
del buf2844
del buf2845
del buf2846
del buf2847
del buf2848
del buf2849
del buf2850
del buf2851
del buf2852
del buf2853
del buf2854
del buf2855
del buf2856
del buf2857
del buf2858
del buf2859
del buf2860
del buf2861
del buf2862
del buf2863
del buf2864
del buf2865
del buf2866
del buf2867
del buf2868
del buf2869
del buf2870
del buf2871
del buf2872
del buf2873
del buf2874
del buf2875
del buf2876
del buf2877
del buf2878
del buf2879
del buf2880
buf2882 = buf14
del buf14
buf2883 = buf2882
del buf2882
buf2884 = buf2883
del buf2883
buf2885 = buf102
del buf102
buf2886 = buf130
del buf130
buf2887 = buf158
del buf158
buf2888 = buf186
del buf186
buf2889 = buf214
del buf214
buf2890 = buf242
del buf242
buf2891 = buf270
del buf270
buf2892 = buf2891
del buf2891
buf2893 = buf2892
del buf2892
buf2894 = buf2893
del buf2893
buf2895 = buf2894
del buf2894
buf2896 = buf2895
del buf2895
buf2897 = buf2896
del buf2896
buf2898 = buf2897
del buf2897
buf2899 = buf2898
del buf2898
buf2900 = buf2899
del buf2899
buf2901 = buf2900
del buf2900
buf2902 = buf2901
del buf2901
buf2903 = buf2902
del buf2902
buf2904 = buf2903
del buf2903
buf2905 = buf2904
del buf2904
buf2906 = buf2905
del buf2905
buf2907 = buf2906
del buf2906
buf2908 = buf2907
del buf2907
buf2909 = buf2908
del buf2908
buf2910 = buf2909
del buf2909
buf2911 = buf2910
del buf2910
buf2912 = buf2911
del buf2911
buf2913 = buf2912
del buf2912
buf2914 = buf2913
del buf2913
buf2915 = buf2914
del buf2914
buf2916 = buf2915
del buf2915
buf2917 = buf1002
del buf1002
buf2918 = buf1026
del buf1026
buf2919 = buf1054
del buf1054
buf2920 = buf1082
del buf1082
buf2921 = buf1110
del buf1110
buf2922 = buf1138
del buf1138
buf2923 = buf1166
del buf1166
buf2924 = buf1194
del buf1194
buf2925 = buf1222
del buf1222
buf2926 = buf1250
del buf1250
buf2927 = buf1278
del buf1278
buf2928 = buf1306
del buf1306
buf2929 = buf1334
del buf1334
buf2930 = buf1362
del buf1362
buf2931 = buf1390
del buf1390
triton_poi_fused_add_minimum_mul_neg_relu_sub_6[grid(256)](buf2884,
buf2885, buf2886, buf2887, buf2888, buf2889, buf2890, buf2916,
buf2917, buf2918, buf2919, buf2920, buf2921, buf2922, buf2923,
buf2924, buf2925, buf2926, buf2927, buf2928, buf2929, buf2930,
buf2931, arg0_1, buf18, buf26, buf22, buf42, buf46, buf54,
buf50, buf70, buf74, buf82, buf78, buf98, buf110, buf106,
buf126, buf138, buf134, buf154, buf166, buf162, buf182, buf194,
buf190, buf210, buf222, buf218, buf238, buf250, buf246, buf266,
buf278, buf274, buf294, buf298, buf306, buf302, buf322, buf326,
buf334, buf330, buf350, buf354, buf362, buf358, buf378, buf382,
buf390, buf386, buf406, buf410, buf418, buf414, buf434, buf438,
buf446, buf442, buf462, buf466, buf474, buf470, buf490, buf494,
buf502, buf498, buf518, buf522, buf530, buf526, buf546, buf550,
buf558, buf554, buf574, buf578, buf586, buf582, buf602, buf606,
buf614, buf610, buf630, buf634, buf642, buf638, buf658, buf662,
buf670, buf666, buf686, buf690, buf698, buf694, buf714, buf718,
buf726, buf722, buf742, buf746, buf754, buf750, buf770, buf774,
buf782, buf778, buf798, buf802, buf810, buf806, buf826, buf830,
buf838, buf834, buf854, buf858, buf866, buf862, buf882, buf886,
buf894, buf890, buf910, buf914, buf922, buf918, buf938, buf942,
buf950, buf946, buf966, buf970, buf978, buf974, buf994, buf998,
buf1006, buf1022, buf1034, buf1030, buf1050, buf1062, buf1058,
buf1078, buf1090, buf1086, buf1106, buf1118, buf1114, buf1134,
buf1146, buf1142, buf1162, buf1174, buf1170, buf1190, buf1202,
buf1198, buf1218, buf1230, buf1226, buf1246, buf1258, buf1254,
buf1274, buf1286, buf1282, buf1302, buf1314, buf1310, buf1330,
buf1342, buf1338, buf1358, buf1370, buf1366, buf1386, buf1398,
buf1394, buf1414, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del buf1006
del buf1022
del buf1030
del buf1034
del buf1050
del buf1058
del buf106
del buf1062
del buf1078
del buf1086
del buf1090
del buf110
del buf1106
del buf1114
del buf1118
del buf1134
del buf1142
del buf1146
del buf1162
del buf1170
del buf1174
del buf1190
del buf1198
del buf1202
del buf1218
del buf1226
del buf1230
del buf1246
del buf1254
del buf1258
del buf126
del buf1274
del buf1282
del buf1286
del buf1302
del buf1310
del buf1314
del buf1330
del buf1338
del buf134
del buf1342
del buf1358
del buf1366
del buf1370
del buf138
del buf1386
del buf1394
del buf1398
del buf1414
del buf154
del buf162
del buf166
del buf18
del buf182
del buf190
del buf194
del buf210
del buf218
del buf22
del buf222
del buf238
del buf246
del buf250
del buf26
del buf266
del buf274
del buf278
del buf2884
del buf2885
del buf2886
del buf2887
del buf2888
del buf2889
del buf2890
del buf2916
del buf2917
del buf2918
del buf2919
del buf2920
del buf2921
del buf2922
del buf2923
del buf2924
del buf2925
del buf2926
del buf2927
del buf2928
del buf2929
del buf2930
del buf294
del buf298
del buf302
del buf306
del buf322
del buf326
del buf330
del buf334
del buf350
del buf354
del buf358
del buf362
del buf378
del buf382
del buf386
del buf390
del buf406
del buf410
del buf414
del buf418
del buf42
del buf434
del buf438
del buf442
del buf446
del buf46
del buf462
del buf466
del buf470
del buf474
del buf490
del buf494
del buf498
del buf50
del buf502
del buf518
del buf522
del buf526
del buf530
del buf54
del buf546
del buf550
del buf554
del buf558
del buf574
del buf578
del buf582
del buf586
del buf602
del buf606
del buf610
del buf614
del buf630
del buf634
del buf638
del buf642
del buf658
del buf662
del buf666
del buf670
del buf686
del buf690
del buf694
del buf698
del buf70
del buf714
del buf718
del buf722
del buf726
del buf74
del buf742
del buf746
del buf750
del buf754
del buf770
del buf774
del buf778
del buf78
del buf782
del buf798
del buf802
del buf806
del buf810
del buf82
del buf826
del buf830
del buf834
del buf838
del buf854
del buf858
del buf862
del buf866
del buf882
del buf886
del buf890
del buf894
del buf910
del buf914
del buf918
del buf922
del buf938
del buf942
del buf946
del buf950
del buf966
del buf970
del buf974
del buf978
del buf98
del buf994
del buf998
return buf2881, buf2931
def sum_tensor(inp, axes, keepdim=False):
axes = np.unique(axes).astype(int)
if keepdim:
for ax in axes:
inp = inp.sum(int(ax), keepdim=True)
else:
for ax in sorted(axes, reverse=True):
inp = inp.sum(int(ax))
return inp
def get_tp_fp_fn_tn(net_output, gt, axes=None, mask=None, square=False):
"""
net_output must be (b, c, x, y(, z)))
gt must be a label map (shape (b, 1, x, y(, z)) OR shape (b, x, y(, z))) or one hot encoding (b, c, x, y(, z))
if mask is provided it must have shape (b, 1, x, y(, z)))
:param net_output:
:param gt:
:param axes: can be (, ) = no summation
:param mask: mask must be 1 for valid pixels and 0 for invalid pixels
:param square: if True then fp, tp and fn will be squared before summation
:return:
"""
if axes is None:
axes = tuple(range(2, len(net_output.size())))
shp_x = net_output.shape
shp_y = gt.shape
with torch.no_grad():
if len(shp_x) != len(shp_y):
gt = gt.view((shp_y[0], 1, *shp_y[1:]))
if all([(i == j) for i, j in zip(net_output.shape, gt.shape)]):
y_onehot = gt
else:
gt = gt.long()
y_onehot = torch.zeros(shp_x)
if net_output.device.type == 'cuda':
y_onehot = y_onehot
y_onehot.scatter_(1, gt, 1)
tp = net_output * y_onehot
fp = net_output * (1 - y_onehot)
fn = (1 - net_output) * y_onehot
tn = (1 - net_output) * (1 - y_onehot)
if mask is not None:
tp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tp,
dim=1)), dim=1)
fp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fp,
dim=1)), dim=1)
fn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fn,
dim=1)), dim=1)
tn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tn,
dim=1)), dim=1)
if square:
tp = tp ** 2
fp = fp ** 2
fn = fn ** 2
tn = tn ** 2
if len(axes) > 0:
tp = sum_tensor(tp, axes, keepdim=False)
fp = sum_tensor(fp, axes, keepdim=False)
fn = sum_tensor(fn, axes, keepdim=False)
tn = sum_tensor(tn, axes, keepdim=False)
return tp, fp, fn, tn
def soft_erode(I):
p1 = -F.max_pool3d(-I, (3, 1, 1), (1, 1, 1), (1, 0, 0))
p2 = -F.max_pool3d(-I, (1, 3, 1), (1, 1, 1), (0, 1, 0))
p3 = -F.max_pool3d(-I, (1, 1, 3), (1, 1, 1), (0, 0, 1))
return torch.min(torch.min(p1, p3), p2)
def soft_dilate(I):
return F.max_pool3d(I, (3, 3, 3), (1, 1, 1), (1, 1, 1))
def soft_open(I):
return soft_dilate(soft_erode(I))
def soft_skel(img, k=50):
img1 = soft_open(img)
skel = F.relu(img - img1)
for iter in range(k):
img = soft_erode(img)
img1 = soft_open(img)
delta = F.relu(img - img1)
skel = skel + F.relu(delta - skel * delta)
if torch.cuda.is_available():
del img1
del delta
return skel
class SoftClDiceLossNew(nn.Module):
def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True,
smooth=1.0, k=2):
"""
"""
super(SoftClDiceLossNew, self).__init__()
self.do_bg = do_bg
self.batch_dice = batch_dice
self.apply_nonlin = apply_nonlin
self.smooth = smooth
self.k = k
def softCenterline(self, I):
max = nn.MaxPool3d(3, stride=1, padding=1)
relu = nn.ReLU()
Ip = max(-max(-I))
cl = relu(I - Ip)
for iter in range(self.k):
I = -max(-I)
Ip = max(-max(-I))
cl = cl + cl * relu(I - Ip)
return cl
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| CamilaGL/nnUNet | SoftClDiceLoss | false | 1,175 | [
"Apache-2.0"
] | 0 | 471ab73a6e4f67fc72d476183b5344be4cccf7ca | https://github.com/CamilaGL/nnUNet/tree/471ab73a6e4f67fc72d476183b5344be4cccf7ca | import torch
import numpy as np
from torch import nn
import torch.jit
import torch.nn.functional as F
import torch.nn.functional
def sum_tensor(inp, axes, keepdim=False):
axes = np.unique(axes).astype(int)
if keepdim:
for ax in axes:
inp = inp.sum(int(ax), keepdim=True)
else:
for ax in sorted(axes, reverse=True):
inp = inp.sum(int(ax))
return inp
def get_tp_fp_fn_tn(net_output, gt, axes=None, mask=None, square=False):
"""
net_output must be (b, c, x, y(, z)))
gt must be a label map (shape (b, 1, x, y(, z)) OR shape (b, x, y(, z))) or one hot encoding (b, c, x, y(, z))
if mask is provided it must have shape (b, 1, x, y(, z)))
:param net_output:
:param gt:
:param axes: can be (, ) = no summation
:param mask: mask must be 1 for valid pixels and 0 for invalid pixels
:param square: if True then fp, tp and fn will be squared before summation
:return:
"""
if axes is None:
axes = tuple(range(2, len(net_output.size())))
shp_x = net_output.shape
shp_y = gt.shape
with torch.no_grad():
if len(shp_x) != len(shp_y):
gt = gt.view((shp_y[0], 1, *shp_y[1:]))
if all([(i == j) for i, j in zip(net_output.shape, gt.shape)]):
y_onehot = gt
else:
gt = gt.long()
y_onehot = torch.zeros(shp_x)
if net_output.device.type == 'cuda':
y_onehot = y_onehot
y_onehot.scatter_(1, gt, 1)
tp = net_output * y_onehot
fp = net_output * (1 - y_onehot)
fn = (1 - net_output) * y_onehot
tn = (1 - net_output) * (1 - y_onehot)
if mask is not None:
tp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tp,
dim=1)), dim=1)
fp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fp,
dim=1)), dim=1)
fn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fn,
dim=1)), dim=1)
tn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tn,
dim=1)), dim=1)
if square:
tp = tp ** 2
fp = fp ** 2
fn = fn ** 2
tn = tn ** 2
if len(axes) > 0:
tp = sum_tensor(tp, axes, keepdim=False)
fp = sum_tensor(fp, axes, keepdim=False)
fn = sum_tensor(fn, axes, keepdim=False)
tn = sum_tensor(tn, axes, keepdim=False)
return tp, fp, fn, tn
def soft_erode(I):
p1 = -F.max_pool3d(-I, (3, 1, 1), (1, 1, 1), (1, 0, 0))
p2 = -F.max_pool3d(-I, (1, 3, 1), (1, 1, 1), (0, 1, 0))
p3 = -F.max_pool3d(-I, (1, 1, 3), (1, 1, 1), (0, 0, 1))
return torch.min(torch.min(p1, p3), p2)
def soft_dilate(I):
return F.max_pool3d(I, (3, 3, 3), (1, 1, 1), (1, 1, 1))
def soft_open(I):
return soft_dilate(soft_erode(I))
def soft_skel(img, k=50):
img1 = soft_open(img)
skel = F.relu(img - img1)
for iter in range(k):
img = soft_erode(img)
img1 = soft_open(img)
delta = F.relu(img - img1)
skel = skel + F.relu(delta - skel * delta)
if torch.cuda.is_available():
del img1
del delta
return skel
class Model(nn.Module):
def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True,
smooth=1.0, k=2):
"""
"""
super().__init__()
self.do_bg = do_bg
self.batch_dice = batch_dice
self.apply_nonlin = apply_nonlin
self.smooth = smooth
self.k = k
def softCenterline(self, I):
max = nn.MaxPool3d(3, stride=1, padding=1)
relu = nn.ReLU()
Ip = max(-max(-I))
cl = relu(I - Ip)
for iter in range(self.k):
I = -max(-I)
Ip = max(-max(-I))
cl = cl + cl * relu(I - Ip)
return cl
def forward(self, x, y, loss_mask=None):
shp_x = x.shape
if self.batch_dice:
axes = [0] + list(range(2, len(shp_x)))
else:
axes = list(r
# ... truncated (>4000 chars) for memory efficiency |
ConvLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/gk/cgkf6erwdwyiy4xdm222c6rkwywjqetjz7vncozpnvl25mi55bq2.py
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d => convolution
# relu => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024, 4096], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 1024
xnumel = 3136
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = (yindex // 256)
tmp0 = tl.load(in_out_ptr0 + (x2 + (3136*y3)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (y0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (x2 + (3136*y3)), tmp4, xmask)
tl.store(out_ptr0 + (y0 + (256*x2) + (802816*y1)), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (256, 1, 9, 9), (81, 81, 9, 1))
assert_size_stride(primals_2, (256, ), (1, ))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 256, 56, 56), (802816, 3136, 56, 1))
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 256, 56, 56), (802816, 1, 14336, 256), torch.bool)
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0.run(buf1, primals_2, buf2, 1024, 3136, grid=grid(1024, 3136), stream=stream0)
del primals_2
return (buf1, primals_1, primals_3, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((256, 1, 9, 9), (81, 81, 9, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class ConvLayer(nn.Module):
"""A Convolutional Layer"""
def __init__(self, in_channels=1, out_channels=256, kernel_size=9, stride=1
):
super(ConvLayer, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, x):
return F.relu(self.conv(x))
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0,
in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.
constexpr):
xnumel = 3136
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_out_ptr0 + (x2 + 3136 * y3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + y0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (x2 + 3136 * y3), tmp4, xmask)
tl.store(out_ptr0 + (y0 + 256 * x2 + 802816 * y1), tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (256, 1, 9, 9), (81, 81, 9, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 256, 56, 56), (802816, 3136, 56, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 256, 56, 56), (802816, 1, 14336, 256),
torch.bool)
get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0[grid(1024, 3136)
](buf1, primals_2, buf2, 1024, 3136, XBLOCK=32, YBLOCK=32,
num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, primals_3, buf2
class ConvLayerNew(nn.Module):
"""A Convolutional Layer"""
def __init__(self, in_channels=1, out_channels=256, kernel_size=9, stride=1
):
super(ConvLayerNew, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| VIVelev/capsnets | ConvLayer | false | 1,176 | [
"MIT"
] | 0 | dca4bfcd4007977a6bc3534a4676880326fcf94a | https://github.com/VIVelev/capsnets/tree/dca4bfcd4007977a6bc3534a4676880326fcf94a | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
"""A Convolutional Layer"""
def __init__(self, in_channels=1, out_channels=256, kernel_size=9, stride=1
):
super().__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, x):
return F.relu(self.conv(x))
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return []
|
MultiHeadAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/se/csevsploqhlaafqt4umuu5feml3ses7qj2jkxsyoy7kfy5rlinsl.py
# Topologically Sorted Source Nodes: [scores_2], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# scores_2 => amax, clone, exp, sub, sum_1
# Graph fragment:
# %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%view_11,), kwargs = {memory_format: torch.contiguous_format})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%clone, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp6 = tmp0 * tmp5
tmp7 = tmp6 * tmp3
tmp8 = triton_helpers.maximum(tmp4, tmp7)
tmp10 = tmp0 * tmp9
tmp11 = tmp10 * tmp3
tmp12 = triton_helpers.maximum(tmp8, tmp11)
tmp14 = tmp0 * tmp13
tmp15 = tmp14 * tmp3
tmp16 = triton_helpers.maximum(tmp12, tmp15)
tmp17 = tmp4 - tmp16
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp7 - tmp16
tmp20 = tl_math.exp(tmp19)
tmp21 = tmp18 + tmp20
tmp22 = tmp11 - tmp16
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp21 + tmp23
tmp25 = tmp15 - tmp16
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp24 + tmp26
tl.store(out_ptr0 + (x3), tmp16, xmask)
tl.store(out_ptr1 + (x3), tmp27, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/dv/cdvddpa3xpcvcmerzvthlyccsihjoyjqxp73clitp4k7k44dwkj6.py
# Topologically Sorted Source Nodes: [scores_2], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# scores_2 => amax, clone, div_1, exp, sub, sum_1
# Graph fragment:
# %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%view_11,), kwargs = {memory_format: torch.contiguous_format})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%clone, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = (xindex // 4)
x0 = xindex % 4
x1 = (xindex // 4) % 4
x3 = (xindex // 64)
x2 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x1 + (4*x0) + (16*x3)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + (x4), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tl.store(out_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/7k/c7kkxqo5r65gqykuvge3exgf3trgxmm4raf7gypitw4ynuylbeao.py
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# attention => clone_2
# Graph fragment:
# %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_8,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/xt/cxtkkmujo4ytg6ycpz5lk5livtstr63pg5nsf5ijewjbtrfrqx6k.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# out => clone_3
# Graph fragment:
# %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%view_17,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/q4/cq4lrbjfvbivmpg2zkxhkatw7yc2rqarfj625cpqjlxqgfutfyet.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.add]
# Source node to ATen node mapping:
# out => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_19, %primals_11), kwargs = {})
triton_poi_fused_add_4 = async_compile.triton('triton_poi_fused_add_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_4
del primals_5
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_6
del primals_7
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf2)
del primals_8
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32)
# Topologically Sorted Source Nodes: [scores_2], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf0, buf1, buf3, buf4, 64, grid=grid(64), stream=stream0)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [scores_2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf0, buf1, buf3, buf4, buf5, 256, grid=grid(256), stream=stream0)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 1, 1), (16, 4, 1, 1, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf2, primals_9, buf6, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_9
buf7 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 0), 0), out=buf7)
buf8 = reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf7, buf8, 16, 4, grid=grid(16, 4), stream=stream0)
buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf8, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf9)
buf10 = reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.add]
triton_poi_fused_add_4.run(buf10, primals_11, 64, grid=grid(64), stream=stream0)
del primals_11
return (buf10, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf0, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), buf1, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), buf5, reinterpret_tensor(buf8, (16, 4), (4, 1), 0), primals_10, reinterpret_tensor(buf6, (16, 1, 4), (4, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class MultiHeadAttention(nn.Module):
"""Multi-headed Attention for input Query, Key, Value
Multi-headed Attention is a module for attention mechanisms which runs through attention in several times in
parallel, then the multiple outputs are concatenated and linearly transformed
Args:
embed_size (int): Max embedding size
num_heads (int): Number of heads in multi-headed attention; Number of splits in the embedding size
dropout (float, optional): Percentage of Dropout to be applied in range 0 <= dropout <=1
batch_dim (int, optional): The dimension in which batch dimensions is
"""
def __init__(self, embed_size: 'int', num_heads: 'int', dropout:
'float'=0.2, batch_dim: 'int'=0):
super(MultiHeadAttention, self).__init__()
self.embed_size = embed_size
self.num_heads = num_heads
self.dropout = dropout
self.batch_dim = batch_dim
self.dropout_layer = nn.Dropout(dropout)
self.head_size = self.embed_size // self.num_heads
assert self.head_size * self.num_heads == self.embed_size, 'Heads cannot split Embedding size equally'
self.Q = nn.Linear(self.embed_size, self.embed_size)
self.K = nn.Linear(self.embed_size, self.embed_size)
self.V = nn.Linear(self.embed_size, self.embed_size)
self.linear = nn.Linear(self.embed_size, self.embed_size)
def forward(self, q, k, v, mask=None):
q_batch_size, q_seq_len, _q_embed_size = q.size()
k_batch_size, k_seq_len, _k_embed_size = k.size()
v_batch_size, v_seq_len, _v_embed_size = v.size()
q = self.Q(q).reshape(q_batch_size, q_seq_len, self.num_heads, self
.head_size)
k = self.K(k).reshape(k_batch_size, k_seq_len, self.num_heads, self
.head_size)
v = self.V(v).reshape(v_batch_size, v_seq_len, self.num_heads, self
.head_size)
attention = self.attention(q, k, v, mask=mask)
concatenated = attention.reshape(v_batch_size, -1, self.embed_size)
out = self.linear(concatenated)
return out
def attention(self, q, k, v, mask=None):
scores = torch.einsum('bqhe,bkhe->bhqk', [q, k])
if mask is not None:
scores = scores.masked_fill(mask == 0, -1000000000.0)
scores /= math.sqrt(self.embed_size)
scores = F.softmax(scores, dim=-1)
scores = self.dropout_layer(scores)
attention = torch.einsum('bhql,blhd->bqhd', [scores, v])
return attention
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'embed_size': 4, 'num_heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr1 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr1 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr1 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp6 = tmp0 * tmp5
tmp7 = tmp6 * tmp3
tmp8 = triton_helpers.maximum(tmp4, tmp7)
tmp10 = tmp0 * tmp9
tmp11 = tmp10 * tmp3
tmp12 = triton_helpers.maximum(tmp8, tmp11)
tmp14 = tmp0 * tmp13
tmp15 = tmp14 * tmp3
tmp16 = triton_helpers.maximum(tmp12, tmp15)
tmp17 = tmp4 - tmp16
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp7 - tmp16
tmp20 = tl_math.exp(tmp19)
tmp21 = tmp18 + tmp20
tmp22 = tmp11 - tmp16
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp21 + tmp23
tmp25 = tmp15 - tmp16
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp24 + tmp26
tl.store(out_ptr0 + x3, tmp16, xmask)
tl.store(out_ptr1 + x3, tmp27, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex // 4
x0 = xindex % 4
x1 = xindex // 4 % 4
x3 = xindex // 64
x2 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x1 + 4 * x0 + 16 * x3), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tl.store(out_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), tmp9, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (16,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_4
del primals_5
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(primals_2, (16,
4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_6
del primals_7
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf2)
del primals_8
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf0, buf1, buf3, buf4, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf0, buf1, buf3, buf4, buf5,
256, XBLOCK=128, num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 1, 1), (16, 4, 1, 1, 1), 0)
del buf4
triton_poi_fused_clone_2[grid(16, 4)](buf2, primals_9, buf6, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_9
buf7 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 0), 0), out=buf7)
buf8 = reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0)
del buf3
triton_poi_fused_clone_3[grid(16, 4)](buf7, buf8, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0)
del buf7
extern_kernels.mm(reinterpret_tensor(buf8, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf9)
buf10 = reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0)
del buf9
triton_poi_fused_add_4[grid(64)](buf10, primals_11, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_11
return buf10, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), buf0, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0
), buf1, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0
), buf5, reinterpret_tensor(buf8, (16, 4), (4, 1), 0
), primals_10, reinterpret_tensor(buf6, (16, 1, 4), (4, 1, 1), 0)
class MultiHeadAttentionNew(nn.Module):
"""Multi-headed Attention for input Query, Key, Value
Multi-headed Attention is a module for attention mechanisms which runs through attention in several times in
parallel, then the multiple outputs are concatenated and linearly transformed
Args:
embed_size (int): Max embedding size
num_heads (int): Number of heads in multi-headed attention; Number of splits in the embedding size
dropout (float, optional): Percentage of Dropout to be applied in range 0 <= dropout <=1
batch_dim (int, optional): The dimension in which batch dimensions is
"""
def __init__(self, embed_size: 'int', num_heads: 'int', dropout:
'float'=0.2, batch_dim: 'int'=0):
super(MultiHeadAttentionNew, self).__init__()
self.embed_size = embed_size
self.num_heads = num_heads
self.dropout = dropout
self.batch_dim = batch_dim
self.dropout_layer = nn.Dropout(dropout)
self.head_size = self.embed_size // self.num_heads
assert self.head_size * self.num_heads == self.embed_size, 'Heads cannot split Embedding size equally'
self.Q = nn.Linear(self.embed_size, self.embed_size)
self.K = nn.Linear(self.embed_size, self.embed_size)
self.V = nn.Linear(self.embed_size, self.embed_size)
self.linear = nn.Linear(self.embed_size, self.embed_size)
def attention(self, q, k, v, mask=None):
scores = torch.einsum('bqhe,bkhe->bhqk', [q, k])
if mask is not None:
scores = scores.masked_fill(mask == 0, -1000000000.0)
scores /= math.sqrt(self.embed_size)
scores = F.softmax(scores, dim=-1)
scores = self.dropout_layer(scores)
attention = torch.einsum('bhql,blhd->bqhd', [scores, v])
return attention
def forward(self, input_0, input_1, input_2):
primals_4 = self.Q.weight
primals_5 = self.Q.bias
primals_6 = self.K.weight
primals_7 = self.K.bias
primals_8 = self.V.weight
primals_9 = self.V.bias
primals_10 = self.linear.weight
primals_11 = self.linear.bias
primals_1 = input_0
primals_2 = input_1
primals_3 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| UdbhavPrasad072300/Kaggle-Competition-Templates | MultiHeadAttention | false | 1,177 | [
"MIT"
] | 0 | f3c93ff60ae33af9b6c6d79d30c5099eb250396c | https://github.com/UdbhavPrasad072300/Kaggle-Competition-Templates/tree/f3c93ff60ae33af9b6c6d79d30c5099eb250396c | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
"""Multi-headed Attention for input Query, Key, Value
Multi-headed Attention is a module for attention mechanisms which runs through attention in several times in
parallel, then the multiple outputs are concatenated and linearly transformed
Args:
embed_size (int): Max embedding size
num_heads (int): Number of heads in multi-headed attention; Number of splits in the embedding size
dropout (float, optional): Percentage of Dropout to be applied in range 0 <= dropout <=1
batch_dim (int, optional): The dimension in which batch dimensions is
"""
def __init__(self, embed_size: 'int', num_heads: 'int', dropout:
'float'=0.2, batch_dim: 'int'=0):
super().__init__()
self.embed_size = embed_size
self.num_heads = num_heads
self.dropout = dropout
self.batch_dim = batch_dim
self.dropout_layer = nn.Dropout(dropout)
self.head_size = self.embed_size // self.num_heads
assert self.head_size * self.num_heads == self.embed_size, 'Heads cannot split Embedding size equally'
self.Q = nn.Linear(self.embed_size, self.embed_size)
self.K = nn.Linear(self.embed_size, self.embed_size)
self.V = nn.Linear(self.embed_size, self.embed_size)
self.linear = nn.Linear(self.embed_size, self.embed_size)
def forward(self, q, k, v, mask=None):
q_batch_size, q_seq_len, _q_embed_size = q.size()
k_batch_size, k_seq_len, _k_embed_size = k.size()
v_batch_size, v_seq_len, _v_embed_size = v.size()
q = self.Q(q).reshape(q_batch_size, q_seq_len, self.num_heads, self
.head_size)
k = self.K(k).reshape(k_batch_size, k_seq_len, self.num_heads, self
.head_size)
v = self.V(v).reshape(v_batch_size, v_seq_len, self.num_heads, self
.head_size)
attention = self.attention(q, k, v, mask=mask)
concatenated = attention.reshape(v_batch_size, -1, self.embed_size)
out = self.linear(concatenated)
return out
def attention(self, q, k, v, mask=None):
scores = torch.einsum('bqhe,bkhe->bhqk', [q, k])
if mask is not None:
scores = scores.masked_fill(mask == 0, -1000000000.0)
scores /= math.sqrt(self.embed_size)
scores = F.softmax(scores, dim=-1)
scores = self.dropout_layer(scores)
attention = torch.einsum('bhql,blhd->bqhd', [scores, v])
return attention
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [4, 4]
|
WeighedL1Loss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/wa/cwattm4dorgdscqelaayil4zrzchrilandgf44svxdwhiritbmq3.py
# Topologically Sorted Source Nodes: [loss, mul, mean], Original ATen: [aten.sub, aten.abs, aten.mul, aten.mean]
# Source node to ATen node mapping:
# loss => abs_1, sub
# mean => mean
# mul => mul
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%abs_1, 4), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul,), kwargs = {})
triton_per_fused_abs_mean_mul_sub_0 = async_compile.triton('triton_per_fused_abs_mean_mul_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_mean_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = 4.0
tmp5 = tmp3 * tmp4
tmp6 = tl.broadcast_to(tmp5, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = 256.0
tmp10 = tmp8 / tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp10, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [loss, mul, mean], Original ATen: [aten.sub, aten.abs, aten.mul, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_abs_mean_mul_sub_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import Tensor
from torch.nn import L1Loss
class WeighedL1Loss(L1Loss):
def __init__(self, weights):
super().__init__(reduction='none')
self.weights = weights
def forward(self, input: 'Tensor', target: 'Tensor') ->Tensor:
loss = super().forward(input, target)
return (loss * self.weights).mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'weights': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch.nn import L1Loss
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = 4.0
tmp5 = tmp3 * tmp4
tmp6 = tl.broadcast_to(tmp5, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = 256.0
tmp10 = tmp8 / tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_mean_mul_sub_0[grid(1)](buf1, arg1_1, arg0_1,
1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class WeighedL1LossNew(L1Loss):
def __init__(self, weights):
super().__init__(reduction='none')
self.weights = weights
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| UT-ADL/lidar-as-camera | WeighedL1Loss | false | 1,178 | [
"Apache-2.0"
] | 0 | daccb2ae21b4899ecfd8611b7a27f91681617383 | https://github.com/UT-ADL/lidar-as-camera/tree/daccb2ae21b4899ecfd8611b7a27f91681617383 | import torch
from torch import Tensor
from torch.nn import L1Loss
class Model(L1Loss):
def __init__(self, weights):
super().__init__(reduction='none')
self.weights = weights
def forward(self, input: 'Tensor', target: 'Tensor') ->Tensor:
loss = super().forward(input, target)
return (loss * self.weights).mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
MaskL1Loss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/6q/c6qceyfxvxieybhxdenyuyjjzaujhzaa7uy3yv5ky5eslmfzfunh.py
# Topologically Sorted Source Nodes: [sub, abs_1, mul, sum_1, sum_2, add, loss], Original ATen: [aten.sub, aten.abs, aten.mul, aten.sum, aten.add, aten.div]
# Source node to ATen node mapping:
# abs_1 => abs_1
# add => add
# loss => div
# mul => mul
# sub => sub
# sum_1 => sum_1
# sum_2 => sum_2
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%abs_1, %arg2_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%arg2_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, 1e-06), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %add), kwargs = {})
triton_per_fused_abs_add_div_mul_sub_sum_0 = async_compile.triton('triton_per_fused_abs_add_div_mul_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_div_mul_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 3, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_add_div_mul_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp4 = tl.load(in_ptr2 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp5 = tmp3 * tmp4
tmp6 = tl.broadcast_to(tmp5, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = tl.broadcast_to(tmp4, [RBLOCK])
tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0))
tmp12 = 1e-06
tmp13 = tmp11 + tmp12
tmp14 = tmp8 / tmp13
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp14, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sub, abs_1, mul, sum_1, sum_2, add, loss], Original ATen: [aten.sub, aten.abs, aten.mul, aten.sum, aten.add, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_abs_add_div_mul_sub_sum_0.run(buf2, arg0_1, arg1_1, arg2_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class MaskL1Loss(nn.Module):
def __init__(self, eps=1e-06):
super(MaskL1Loss, self).__init__()
self.eps = eps
def forward(self, pred: 'torch.Tensor', gt, mask):
loss = (torch.abs(pred - gt) * mask).sum() / (mask.sum() + self.eps)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_add_div_mul_sub_sum_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp4 = tl.load(in_ptr2 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp5 = tmp3 * tmp4
tmp6 = tl.broadcast_to(tmp5, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = tl.broadcast_to(tmp4, [RBLOCK])
tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0))
tmp12 = 1e-06
tmp13 = tmp11 + tmp12
tmp14 = tmp8 / tmp13
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp14, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_add_div_mul_sub_sum_0[grid(1)](buf2, arg0_1,
arg1_1, arg2_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf2,
class MaskL1LossNew(nn.Module):
def __init__(self, eps=1e-06):
super(MaskL1LossNew, self).__init__()
self.eps = eps
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| Vivianyzw/Dual.DBNet.pytorch | MaskL1Loss | false | 1,179 | [
"Apache-2.0",
"MIT"
] | 0 | 19d823ed7c05076c087a3f7ad1127c71c1c0d692 | https://github.com/Vivianyzw/Dual.DBNet.pytorch/tree/19d823ed7c05076c087a3f7ad1127c71c1c0d692 | import torch
from torch import nn
class Model(nn.Module):
def __init__(self, eps=1e-06):
super().__init__()
self.eps = eps
def forward(self, pred: 'torch.Tensor', gt, mask):
loss = (torch.abs(pred - gt) * mask).sum() / (mask.sum() + self.eps)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return []
|
HardSigmoid | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/wc/cwcfnwuommyu7iwqk7p3drqzze6t2tbhapxpjj7cjtdj7vrmgtpp.py
# Topologically Sorted Source Nodes: [mul, x, neg, result, neg_1, result_1], Original ATen: [aten.mul, aten.add, aten.neg, aten.threshold]
# Source node to ATen node mapping:
# mul => mul
# neg => neg
# neg_1 => neg_1
# result => full_default, le, where
# result_1 => full_default_1, le_1, where_1
# x => add
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 0.5), kwargs = {})
# %neg : [num_users=2] = call_function[target=torch.ops.aten.neg.default](args = (%add,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%neg, -1), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%le, %full_default, %neg), kwargs = {})
# %neg_1 : [num_users=2] = call_function[target=torch.ops.aten.neg.default](args = (%where,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%neg_1, 0), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%le_1, %full_default_1, %neg_1), kwargs = {})
triton_poi_fused_add_mul_neg_threshold_0 = async_compile.triton('triton_poi_fused_add_mul_neg_threshold_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_neg_threshold_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_neg_threshold_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.2
tmp2 = tmp0 * tmp1
tmp3 = 0.5
tmp4 = tmp2 + tmp3
tmp5 = -tmp4
tmp6 = -1.0
tmp7 = tmp5 <= tmp6
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = -tmp8
tmp10 = 0.0
tmp11 = tmp9 <= tmp10
tmp12 = tl.where(tmp11, tmp10, tmp9)
tl.store(out_ptr0 + (x0), tmp12, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, x, neg, result, neg_1, result_1], Original ATen: [aten.mul, aten.add, aten.neg, aten.threshold]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_neg_threshold_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class HardSigmoid(nn.Module):
def __init__(self, slope=0.2, offset=0.5):
super().__init__()
self.slope = slope
self.offset = offset
def forward(self, x):
x = self.slope * x + self.offset
x = F.threshold(-x, -1, -1)
x = F.threshold(-x, 0, 0)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_neg_threshold_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.2
tmp2 = tmp0 * tmp1
tmp3 = 0.5
tmp4 = tmp2 + tmp3
tmp5 = -tmp4
tmp6 = -1.0
tmp7 = tmp5 <= tmp6
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = -tmp8
tmp10 = 0.0
tmp11 = tmp9 <= tmp10
tmp12 = tl.where(tmp11, tmp10, tmp9)
tl.store(out_ptr0 + x0, tmp12, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_neg_threshold_0[grid(256)](arg0_1, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class HardSigmoidNew(nn.Module):
def __init__(self, slope=0.2, offset=0.5):
super().__init__()
self.slope = slope
self.offset = offset
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Vivianyzw/Dual.DBNet.pytorch | HardSigmoid | false | 1,180 | [
"Apache-2.0",
"MIT"
] | 0 | 19d823ed7c05076c087a3f7ad1127c71c1c0d692 | https://github.com/Vivianyzw/Dual.DBNet.pytorch/tree/19d823ed7c05076c087a3f7ad1127c71c1c0d692 | import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, slope=0.2, offset=0.5):
super().__init__()
self.slope = slope
self.offset = offset
def forward(self, x):
x = self.slope * x + self.offset
x = F.threshold(-x, -1, -1)
x = F.threshold(-x, 0, 0)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
MLP | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/zi/czi6taqk3yywywfl3iwbejutxysbxi6hrg6s2rrrevzoemnmagnw.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_6, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x4), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/6h/c6hgrncbhy7kjladlqflhqnw52mciqxt6qj53hxyw2giskevmcnl.py
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.view]
# Source node to ATen node mapping:
# linear_1 => view_7
# Graph fragment:
# %view_7 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%view_6, [64, 4]), kwargs = {})
triton_poi_fused_view_1 = async_compile.triton('triton_poi_fused_view_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_view_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*((x1 % 4) // 4)) + (64*(((4*((x1 // 4) % 4)) + (x1 % 4)) // 16))), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf4, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.view]
triton_poi_fused_view_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0)
buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_5
return (reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, primals_4, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.autograd import *
class FullyConnectedLayer(nn.Module):
def __init__(self, in_size, out_size, dropout_r=0.0, use_relu=True):
super(FullyConnectedLayer, self).__init__()
self.dropout_r = dropout_r
self.use_relu = use_relu
self.linear = nn.Linear(in_size, out_size)
if use_relu:
self.relu = nn.ReLU(inplace=True)
if dropout_r > 0:
self.dropout = nn.Dropout(dropout_r)
def forward(self, x):
x = self.linear(x)
if self.use_relu:
x = self.relu(x)
if self.dropout_r > 0:
x = self.dropout(x)
return x
class MLP(nn.Module):
def __init__(self, in_size, mid_size, out_size, dropout_r=0.0, use_relu
=True):
super(MLP, self).__init__()
self.fc = FullyConnectedLayer(in_size, mid_size, dropout_r=
dropout_r, use_relu=use_relu)
self.linear = nn.Linear(mid_size, out_size)
def forward(self, x):
return self.linear(self.fc(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_size': 4, 'mid_size': 4, 'out_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from torch.autograd import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + x4, tmp6, xmask)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * (x1 % 4 // 4) + 64 * ((4 *
(x1 // 4 % 4) + x1 % 4) // 16)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused_view_1[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0)
del buf1
extern_kernels.addmm(primals_5, buf2, reinterpret_tensor(primals_4,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_5
return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf2, primals_4, buf4
class FullyConnectedLayer(nn.Module):
def __init__(self, in_size, out_size, dropout_r=0.0, use_relu=True):
super(FullyConnectedLayer, self).__init__()
self.dropout_r = dropout_r
self.use_relu = use_relu
self.linear = nn.Linear(in_size, out_size)
if use_relu:
self.relu = nn.ReLU(inplace=True)
if dropout_r > 0:
self.dropout = nn.Dropout(dropout_r)
def forward(self, x):
x = self.linear(x)
if self.use_relu:
x = self.relu(x)
if self.dropout_r > 0:
x = self.dropout(x)
return x
class MLPNew(nn.Module):
def __init__(self, in_size, mid_size, out_size, dropout_r=0.0, use_relu
=True):
super(MLPNew, self).__init__()
self.fc = FullyConnectedLayer(in_size, mid_size, dropout_r=
dropout_r, use_relu=use_relu)
self.linear = nn.Linear(mid_size, out_size)
def forward(self, input_0):
primals_1 = self.fc.linear.weight
primals_2 = self.fc.linear.bias
primals_4 = self.linear.weight
primals_5 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| VISLANG-Lab/MGCL | MLP | false | 1,181 | [
"MIT"
] | 0 | 22da06ffa7410d9632bfda8eefb1b79e4f660de0 | https://github.com/VISLANG-Lab/MGCL/tree/22da06ffa7410d9632bfda8eefb1b79e4f660de0 | import torch
import torch.nn as nn
from torch.autograd import *
class FullyConnectedLayer(nn.Module):
def __init__(self, in_size, out_size, dropout_r=0.0, use_relu=True):
super().__init__()
self.dropout_r = dropout_r
self.use_relu = use_relu
self.linear = nn.Linear(in_size, out_size)
if use_relu:
self.relu = nn.ReLU(inplace=True)
if dropout_r > 0:
self.dropout = nn.Dropout(dropout_r)
def forward(self, x):
x = self.linear(x)
if self.use_relu:
x = self.relu(x)
if self.dropout_r > 0:
x = self.dropout(x)
return x
class Model(nn.Module):
def __init__(self, in_size, mid_size, out_size, dropout_r=0.0, use_relu
=True):
super().__init__()
self.fc = FullyConnectedLayer(in_size, mid_size, dropout_r=
dropout_r, use_relu=use_relu)
self.linear = nn.Linear(mid_size, out_size)
def forward(self, x):
return self.linear(self.fc(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
DiceLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/rv/crvzm2ku4tkenvfche2zhvobpilj6rk4dod4yym57qafqee4dbjv.py
# Topologically Sorted Source Nodes: [mul, mul_1, intersection, mul_4, mul_2, sum_2, mul_3, sum_3, add, union, truediv, loss], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub]
# Source node to ATen node mapping:
# add => add
# intersection => sum_1
# loss => sub
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# sum_2 => sum_2
# sum_3 => sum_3
# truediv => div
# union => add_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %arg2_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_1,), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2.0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg2_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_2,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %arg2_1), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_3,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, 1e-06), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_4, %add_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div), kwargs = {})
triton_per_fused_add_div_mul_rsub_sum_0 = async_compile.triton('triton_per_fused_add_div_mul_rsub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_rsub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp3 = tl.load(in_ptr2 + (r0), None)
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = tl.sum(tmp5, 1)[:, None]
tmp8 = tmp0 * tmp3
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tmp12 = tmp1 * tmp3
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.sum(tmp13, 1)[:, None]
tmp16 = 2.0
tmp17 = tmp7 * tmp16
tmp18 = tmp11 + tmp15
tmp19 = 1e-06
tmp20 = tmp18 + tmp19
tmp21 = tmp17 / tmp20
tmp22 = 1.0
tmp23 = tmp22 - tmp21
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp23, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mul, mul_1, intersection, mul_4, mul_2, sum_2, mul_3, sum_3, add, union, truediv, loss], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mul_rsub_sum_0.run(buf3, arg0_1, arg1_1, arg2_1, 1, 16, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class DiceLoss(nn.Module):
"""
Loss function from https://arxiv.org/abs/1707.03237,
where iou computation is introduced heatmap manner to measure the
diversity bwtween tow heatmaps.
"""
def __init__(self, eps=1e-06):
super(DiceLoss, self).__init__()
self.eps = eps
def forward(self, pred: 'torch.Tensor', gt, mask, weights=None):
"""
pred: one or two heatmaps of shape (N, 1, H, W),
the losses of tow heatmaps are added together.
gt: (N, 1, H, W)
mask: (N, H, W)
"""
return self._compute(pred, gt, mask, weights)
def _compute(self, pred, gt, mask, weights):
if pred.dim() == 4:
pred = pred[:, 0, :, :]
gt = gt[:, 0, :, :]
assert pred.shape == gt.shape
assert pred.shape == mask.shape
if weights is not None:
assert weights.shape == mask.shape
mask = weights * mask
intersection = (pred * gt * mask).sum()
union = (pred * mask).sum() + (gt * mask).sum() + self.eps
loss = 1 - 2.0 * intersection / union
assert loss <= 1
return loss
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp3 = tl.load(in_ptr2 + r0, None)
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = tl.sum(tmp5, 1)[:, None]
tmp8 = tmp0 * tmp3
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tmp12 = tmp1 * tmp3
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.sum(tmp13, 1)[:, None]
tmp16 = 2.0
tmp17 = tmp7 * tmp16
tmp18 = tmp11 + tmp15
tmp19 = 1e-06
tmp20 = tmp18 + tmp19
tmp21 = tmp17 / tmp20
tmp22 = 1.0
tmp23 = tmp22 - tmp21
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp23, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mul_rsub_sum_0[grid(1)](buf3, arg0_1,
arg1_1, arg2_1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf3,
class DiceLossNew(nn.Module):
"""
Loss function from https://arxiv.org/abs/1707.03237,
where iou computation is introduced heatmap manner to measure the
diversity bwtween tow heatmaps.
"""
def __init__(self, eps=1e-06):
super(DiceLossNew, self).__init__()
self.eps = eps
def _compute(self, pred, gt, mask, weights):
if pred.dim() == 4:
pred = pred[:, 0, :, :]
gt = gt[:, 0, :, :]
assert pred.shape == gt.shape
assert pred.shape == mask.shape
if weights is not None:
assert weights.shape == mask.shape
mask = weights * mask
intersection = (pred * gt * mask).sum()
union = (pred * mask).sum() + (gt * mask).sum() + self.eps
loss = 1 - 2.0 * intersection / union
assert loss <= 1
return loss
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| Vivianyzw/Dual.DBNet.pytorch | DiceLoss | false | 1,182 | [
"Apache-2.0",
"MIT"
] | 0 | 19d823ed7c05076c087a3f7ad1127c71c1c0d692 | https://github.com/Vivianyzw/Dual.DBNet.pytorch/tree/19d823ed7c05076c087a3f7ad1127c71c1c0d692 | import torch
from torch import nn
class Model(nn.Module):
"""
Loss function from https://arxiv.org/abs/1707.03237,
where iou computation is introduced heatmap manner to measure the
diversity bwtween tow heatmaps.
"""
def __init__(self, eps=1e-06):
super().__init__()
self.eps = eps
def forward(self, pred: 'torch.Tensor', gt, mask, weights=None):
"""
pred: one or two heatmaps of shape (N, 1, H, W),
the losses of tow heatmaps are added together.
gt: (N, 1, H, W)
mask: (N, H, W)
"""
return self._compute(pred, gt, mask, weights)
def _compute(self, pred, gt, mask, weights):
if pred.dim() == 4:
pred = pred[:, 0, :, :]
gt = gt[:, 0, :, :]
assert pred.shape == gt.shape
assert pred.shape == mask.shape
if weights is not None:
assert weights.shape == mask.shape
mask = weights * mask
intersection = (pred * gt * mask).sum()
union = (pred * mask).sum() + (gt * mask).sum() + self.eps
loss = 1 - 2.0 * intersection / union
assert loss <= 1
return loss
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return []
|
AFMLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/oj/coji63cmjptmfiahnhfxrcymtijnwomdesxsksu5cd5o6hnjtmkc.py
# Topologically Sorted Source Nodes: [p, q, inner_product], Original ATen: [aten.cat, aten.mul]
# Source node to ATen node mapping:
# inner_product => mul
# p => cat
# q => cat_1
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%select, %select, %select, %select_1, %select_1, %select_2], 1), kwargs = {})
# %cat_1 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%select_1, %select_2, %select_3, %select_2, %select_3, %select_3], 1), kwargs = {})
# %mul : [num_users=3] = call_function[target=torch.ops.aten.mul.Tensor](args = (%cat, %cat_1), kwargs = {})
triton_poi_fused_cat_mul_0 = async_compile.triton('triton_poi_fused_cat_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 24
x0 = xindex % 4
x2 = (xindex // 96)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (x0 + (4*((-4) + x1)) + (16*x2)), tmp9 & xmask, other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (x0 + (4*((-8) + x1)) + (16*x2)), tmp14 & xmask, other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 16, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr0 + (64 + x0 + (4*((-12) + x1)) + (16*x2)), tmp19 & xmask, other=0.0)
tmp21 = tmp0 >= tmp17
tmp22 = tl.full([1], 20, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tmp21 & tmp23
tmp25 = tl.load(in_ptr0 + (64 + x0 + (4*((-16) + x1)) + (16*x2)), tmp24 & xmask, other=0.0)
tmp26 = tmp0 >= tmp22
tmp27 = tl.full([1], 24, tl.int64)
tmp28 = tmp0 < tmp27
tmp29 = tl.load(in_ptr0 + (128 + x0 + (4*((-20) + x1)) + (16*x2)), tmp26 & xmask, other=0.0)
tmp30 = tl.where(tmp24, tmp25, tmp29)
tmp31 = tl.where(tmp19, tmp20, tmp30)
tmp32 = tl.where(tmp14, tmp15, tmp31)
tmp33 = tl.where(tmp9, tmp10, tmp32)
tmp34 = tl.where(tmp4, tmp5, tmp33)
tmp35 = tl.load(in_ptr0 + (64 + x0 + (4*x1) + (16*x2)), tmp4 & xmask, other=0.0)
tmp36 = tl.load(in_ptr0 + (128 + x0 + (4*((-4) + x1)) + (16*x2)), tmp9 & xmask, other=0.0)
tmp37 = tl.load(in_ptr0 + (192 + x0 + (4*((-8) + x1)) + (16*x2)), tmp14 & xmask, other=0.0)
tmp38 = tl.load(in_ptr0 + (128 + x0 + (4*((-12) + x1)) + (16*x2)), tmp19 & xmask, other=0.0)
tmp39 = tl.load(in_ptr0 + (192 + x0 + (4*((-16) + x1)) + (16*x2)), tmp24 & xmask, other=0.0)
tmp40 = tl.load(in_ptr0 + (192 + x0 + (4*((-20) + x1)) + (16*x2)), tmp26 & xmask, other=0.0)
tmp41 = tl.where(tmp24, tmp39, tmp40)
tmp42 = tl.where(tmp19, tmp38, tmp41)
tmp43 = tl.where(tmp14, tmp37, tmp42)
tmp44 = tl.where(tmp9, tmp36, tmp43)
tmp45 = tl.where(tmp4, tmp35, tmp44)
tmp46 = tmp34 * tmp45
tl.store(in_out_ptr0 + (x3), tmp46, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/j2/cj26ownu73m72kwjlseu3qfwtrz4f3ru464aa4zuhodtujlnjupm.py
# Topologically Sorted Source Nodes: [add, attention_temp], Original ATen: [aten.add, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# add => add
# attention_temp => relu
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_2, %primals_3), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_add_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_add_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/p4/cp4mdcdve4y73ad5mzhckzksofhes3a2n2zye5hynnmbc62ct27d.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, div, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_5, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_5, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_per_fused__softmax_2 = async_compile.triton('triton_per_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 32],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_2(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 24
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (24*x0)), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + (24*x0)), tmp11, rmask & xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ui/cui4pbynpryqmgmjhsdzeompa6sltxsmy5ggopxiaqdlvyafsjpl.py
# Topologically Sorted Source Nodes: [mul_1, attention_output], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# attention_output => sum_2
# mul_1 => mul_1
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %mul), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [1]), kwargs = {})
triton_per_fused_mul_sum_3 = async_compile.triton('triton_per_fused_mul_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 32],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 24
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r2 = rindex
x1 = (xindex // 4)
x0 = xindex % 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + (24*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp1 = tl.load(in_ptr1 + (x0 + (4*r2) + (96*x1)), rmask & xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(rmask & xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tl.store(out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 1), (1, 1))
assert_size_stride(primals_5, (4, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 24, 4), (96, 4, 1), torch.float32)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [p, q, inner_product], Original ATen: [aten.cat, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_mul_0.run(buf2, primals_1, 384, grid=grid(384), stream=stream0)
del primals_1
buf3 = empty_strided_cuda((96, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [tensordot], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf2, (96, 4), (4, 1), 0), primals_2, out=buf3)
del primals_2
buf4 = reinterpret_tensor(buf3, (4, 24, 4), (96, 4, 1), 0); del buf3 # reuse
buf11 = empty_strided_cuda((4, 24, 4), (96, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [add, attention_temp], Original ATen: [aten.add, aten.relu, aten.threshold_backward]
triton_poi_fused_add_relu_threshold_backward_1.run(buf4, primals_3, buf11, 384, grid=grid(384), stream=stream0)
del primals_3
buf5 = empty_strided_cuda((96, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [tensordot_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf4, (96, 4), (4, 1), 0), primals_4, out=buf5)
buf8 = empty_strided_cuda((4, 24, 1), (24, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_per_fused__softmax_2.run(buf5, buf8, 4, 24, grid=grid(4), stream=stream0)
del buf5
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_1, attention_output], Original ATen: [aten.mul, aten.sum]
triton_per_fused_mul_sum_3.run(buf8, buf2, buf9, 16, 24, grid=grid(16), stream=stream0)
buf10 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [afm_out], Original ATen: [aten.mm]
extern_kernels.mm(buf9, primals_5, out=buf10)
return (buf10, buf8, buf2, buf8, reinterpret_tensor(buf9, (4, 4), (1, 4), 0), reinterpret_tensor(primals_5, (1, 4), (1, 1), 0), reinterpret_tensor(buf4, (4, 96), (1, 4), 0), reinterpret_tensor(primals_4, (1, 4), (1, 1), 0), buf11, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import itertools
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics import *
class AFMLayer(nn.Module):
"""Attentonal Factorization Machine models pairwise (order-2) feature
interactions without linear term and bias.
Input shape
- A list of 3D tensor with shape: ``(batch_size,1,embedding_size)``.
Output shape
- 2D tensor with shape: ``(batch_size, 1)``.
Arguments
- **in_features** : Positive integer, dimensionality of input features.
- **attention_factor** : Positive integer, dimensionality of the
attention network output space.
- **l2_reg_w** : float between 0 and 1. L2 regularizer strength
applied to attention network.
- **dropout_rate** : float between in [0,1). Fraction of the attention net output units to dropout.
- **seed** : A Python integer to use as random seed.
References
- [Attentional Factorization Machines : Learning the Weight of Feature
Interactions via Attention Networks](https://arxiv.org/pdf/1708.04617.pdf)
"""
def __init__(self, in_features, attention_factor=4, l2_reg_w=0,
dropout_rate=0, seed=1024, device='cpu'):
super(AFMLayer, self).__init__()
self.attention_factor = attention_factor
self.l2_reg_w = l2_reg_w
self.dropout_rate = dropout_rate
self.seed = seed
embedding_size = in_features
self.attention_W = nn.Parameter(torch.Tensor(embedding_size, self.
attention_factor))
self.attention_b = nn.Parameter(torch.Tensor(self.attention_factor))
self.projection_h = nn.Parameter(torch.Tensor(self.attention_factor, 1)
)
self.projection_p = nn.Parameter(torch.Tensor(embedding_size, 1))
for tensor in [self.attention_W, self.projection_h, self.projection_p]:
nn.init.xavier_normal_(tensor)
for tensor in [self.attention_b]:
nn.init.zeros_(tensor)
self.dropout = nn.Dropout(dropout_rate)
self
def forward(self, inputs):
embeds_vec_list = inputs
row = []
col = []
for r, c in itertools.combinations(embeds_vec_list, 2):
row.append(r)
col.append(c)
p = torch.cat(row, dim=1)
q = torch.cat(col, dim=1)
inner_product = p * q
bi_interaction = inner_product
attention_temp = F.relu(torch.tensordot(bi_interaction, self.
attention_W, dims=([-1], [0])) + self.attention_b)
self.normalized_att_score = F.softmax(torch.tensordot(
attention_temp, self.projection_h, dims=([-1], [0])), dim=1)
attention_output = torch.sum(self.normalized_att_score *
bi_interaction, dim=1)
attention_output = self.dropout(attention_output)
afm_out = torch.tensordot(attention_output, self.projection_p, dims
=([-1], [0]))
return afm_out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
from sklearn.metrics import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 24
x0 = xindex % 4
x2 = xindex // 96
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (x0 + 4 * (-4 + x1) + 16 * x2), tmp9 & xmask,
other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (x0 + 4 * (-8 + x1) + 16 * x2), tmp14 & xmask,
other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 16, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr0 + (64 + x0 + 4 * (-12 + x1) + 16 * x2), tmp19 &
xmask, other=0.0)
tmp21 = tmp0 >= tmp17
tmp22 = tl.full([1], 20, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tmp21 & tmp23
tmp25 = tl.load(in_ptr0 + (64 + x0 + 4 * (-16 + x1) + 16 * x2), tmp24 &
xmask, other=0.0)
tmp26 = tmp0 >= tmp22
tl.full([1], 24, tl.int64)
tmp29 = tl.load(in_ptr0 + (128 + x0 + 4 * (-20 + x1) + 16 * x2), tmp26 &
xmask, other=0.0)
tmp30 = tl.where(tmp24, tmp25, tmp29)
tmp31 = tl.where(tmp19, tmp20, tmp30)
tmp32 = tl.where(tmp14, tmp15, tmp31)
tmp33 = tl.where(tmp9, tmp10, tmp32)
tmp34 = tl.where(tmp4, tmp5, tmp33)
tmp35 = tl.load(in_ptr0 + (64 + x0 + 4 * x1 + 16 * x2), tmp4 & xmask,
other=0.0)
tmp36 = tl.load(in_ptr0 + (128 + x0 + 4 * (-4 + x1) + 16 * x2), tmp9 &
xmask, other=0.0)
tmp37 = tl.load(in_ptr0 + (192 + x0 + 4 * (-8 + x1) + 16 * x2), tmp14 &
xmask, other=0.0)
tmp38 = tl.load(in_ptr0 + (128 + x0 + 4 * (-12 + x1) + 16 * x2), tmp19 &
xmask, other=0.0)
tmp39 = tl.load(in_ptr0 + (192 + x0 + 4 * (-16 + x1) + 16 * x2), tmp24 &
xmask, other=0.0)
tmp40 = tl.load(in_ptr0 + (192 + x0 + 4 * (-20 + x1) + 16 * x2), tmp26 &
xmask, other=0.0)
tmp41 = tl.where(tmp24, tmp39, tmp40)
tmp42 = tl.where(tmp19, tmp38, tmp41)
tmp43 = tl.where(tmp14, tmp37, tmp42)
tmp44 = tl.where(tmp9, tmp36, tmp43)
tmp45 = tl.where(tmp4, tmp35, tmp44)
tmp46 = tmp34 * tmp45
tl.store(in_out_ptr0 + x3, tmp46, xmask)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_per_fused__softmax_2(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 4
rnumel = 24
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 24 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 24 * x0), tmp11, rmask & xmask)
@triton.jit
def triton_per_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 16
rnumel = 24
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r2 = rindex
x1 = xindex // 4
x0 = xindex % 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + 24 * x1), rmask & xmask, eviction_policy
='evict_last', other=0.0)
tmp1 = tl.load(in_ptr1 + (x0 + 4 * r2 + 96 * x1), rmask & xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(rmask & xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tl.store(out_ptr0 + x3, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 1), (1, 1))
assert_size_stride(primals_5, (4, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 24, 4), (96, 4, 1), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_cat_mul_0[grid(384)](buf2, primals_1, 384, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_1
buf3 = empty_strided_cuda((96, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (96, 4), (4, 1), 0),
primals_2, out=buf3)
del primals_2
buf4 = reinterpret_tensor(buf3, (4, 24, 4), (96, 4, 1), 0)
del buf3
buf11 = empty_strided_cuda((4, 24, 4), (96, 4, 1), torch.bool)
triton_poi_fused_add_relu_threshold_backward_1[grid(384)](buf4,
primals_3, buf11, 384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
buf5 = empty_strided_cuda((96, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf4, (96, 4), (4, 1), 0),
primals_4, out=buf5)
buf8 = empty_strided_cuda((4, 24, 1), (24, 1, 1), torch.float32)
triton_per_fused__softmax_2[grid(4)](buf5, buf8, 4, 24, XBLOCK=1,
num_warps=2, num_stages=1)
del buf5
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_per_fused_mul_sum_3[grid(16)](buf8, buf2, buf9, 16, 24,
XBLOCK=1, num_warps=2, num_stages=1)
buf10 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf9, primals_5, out=buf10)
return buf10, buf8, buf2, buf8, reinterpret_tensor(buf9, (4, 4), (1, 4), 0
), reinterpret_tensor(primals_5, (1, 4), (1, 1), 0
), reinterpret_tensor(buf4, (4, 96), (1, 4), 0), reinterpret_tensor(
primals_4, (1, 4), (1, 1), 0), buf11
class AFMLayerNew(nn.Module):
"""Attentonal Factorization Machine models pairwise (order-2) feature
interactions without linear term and bias.
Input shape
- A list of 3D tensor with shape: ``(batch_size,1,embedding_size)``.
Output shape
- 2D tensor with shape: ``(batch_size, 1)``.
Arguments
- **in_features** : Positive integer, dimensionality of input features.
- **attention_factor** : Positive integer, dimensionality of the
attention network output space.
- **l2_reg_w** : float between 0 and 1. L2 regularizer strength
applied to attention network.
- **dropout_rate** : float between in [0,1). Fraction of the attention net output units to dropout.
- **seed** : A Python integer to use as random seed.
References
- [Attentional Factorization Machines : Learning the Weight of Feature
Interactions via Attention Networks](https://arxiv.org/pdf/1708.04617.pdf)
"""
def __init__(self, in_features, attention_factor=4, l2_reg_w=0,
dropout_rate=0, seed=1024, device='cpu'):
super(AFMLayerNew, self).__init__()
self.attention_factor = attention_factor
self.l2_reg_w = l2_reg_w
self.dropout_rate = dropout_rate
self.seed = seed
embedding_size = in_features
self.attention_W = nn.Parameter(torch.Tensor(embedding_size, self.
attention_factor))
self.attention_b = nn.Parameter(torch.Tensor(self.attention_factor))
self.projection_h = nn.Parameter(torch.Tensor(self.attention_factor, 1)
)
self.projection_p = nn.Parameter(torch.Tensor(embedding_size, 1))
for tensor in [self.attention_W, self.projection_h, self.projection_p]:
nn.init.xavier_normal_(tensor)
for tensor in [self.attention_b]:
nn.init.zeros_(tensor)
self.dropout = nn.Dropout(dropout_rate)
self
def forward(self, input_0):
primals_2 = self.attention_W
primals_3 = self.attention_b
primals_4 = self.projection_h
primals_5 = self.projection_p
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| Sunmyunghan/Final_Project | AFMLayer | false | 1,183 | [
"MIT"
] | 0 | 28cde293dc6d07521b2e1c5613b20444aea91d21 | https://github.com/Sunmyunghan/Final_Project/tree/28cde293dc6d07521b2e1c5613b20444aea91d21 | import itertools
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics import *
class Model(nn.Module):
"""Attentonal Factorization Machine models pairwise (order-2) feature
interactions without linear term and bias.
Input shape
- A list of 3D tensor with shape: ``(batch_size,1,embedding_size)``.
Output shape
- 2D tensor with shape: ``(batch_size, 1)``.
Arguments
- **in_features** : Positive integer, dimensionality of input features.
- **attention_factor** : Positive integer, dimensionality of the
attention network output space.
- **l2_reg_w** : float between 0 and 1. L2 regularizer strength
applied to attention network.
- **dropout_rate** : float between in [0,1). Fraction of the attention net output units to dropout.
- **seed** : A Python integer to use as random seed.
References
- [Attentional Factorization Machines : Learning the Weight of Feature
Interactions via Attention Networks](https://arxiv.org/pdf/1708.04617.pdf)
"""
def __init__(self, in_features, attention_factor=4, l2_reg_w=0,
dropout_rate=0, seed=1024, device='cpu'):
super().__init__()
self.attention_factor = attention_factor
self.l2_reg_w = l2_reg_w
self.dropout_rate = dropout_rate
self.seed = seed
embedding_size = in_features
self.attention_W = nn.Parameter(torch.Tensor(embedding_size, self.
attention_factor))
self.attention_b = nn.Parameter(torch.Tensor(self.attention_factor))
self.projection_h = nn.Parameter(torch.Tensor(self.attention_factor, 1)
)
self.projection_p = nn.Parameter(torch.Tensor(embedding_size, 1))
for tensor in [self.attention_W, self.projection_h, self.projection_p]:
nn.init.xavier_normal_(tensor)
for tensor in [self.attention_b]:
nn.init.zeros_(tensor)
self.dropout = nn.Dropout(dropout_rate)
self
def forward(self, inputs):
embeds_vec_list = inputs
row = []
col = []
for r, c in itertools.combinations(embeds_vec_list, 2):
row.append(r)
col.append(c)
p = torch.cat(row, dim=1)
q = torch.cat(col, dim=1)
inner_product = p * q
bi_interaction = inner_product
attention_temp = F.relu(torch.tensordot(bi_interaction, self.
attention_W, dims=([-1], [0])) + self.attention_b)
self.normalized_att_score = F.softmax(torch.tensordot(
attention_temp, self.projection_h, dims=([-1], [0])), dim=1)
attention_output = torch.sum(self.normalized_att_score *
bi_interaction, dim=1)
attention_output = self.dropout(attention_output)
afm_out = torch.tensordot(attention_output, self.projection_p, dims
=([-1], [0]))
return afm_out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Discriminator | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/z5/cz5bgdo2gmhnnmtf6w7lrjkvliacxo7nomq7mbmjquxqyxqgt5bj.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/rm/crm4achd57rtzs377vwsvkyz5fhhtqzcbaqyzscjlfp5duwmbj2e.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_1 => relu_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = 0.0
tmp7 = tmp5 <= tmp6
tl.store(in_out_ptr0 + (x0), tmp5, xmask)
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2, 4), (4, 1))
assert_size_stride(primals_3, (2, ), (1, ))
assert_size_stride(primals_4, (1, 2), (2, 1))
assert_size_stride(primals_5, (1, ), (1, ))
assert_size_stride(primals_6, (2, 1), (1, 1))
assert_size_stride(primals_7, (2, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 2), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1, 4, 2), (32, 8, 8, 8, 2, 1), 0); del buf0 # reuse
buf8 = empty_strided_cuda((4, 4, 1, 1, 4, 2), (32, 8, 8, 8, 2, 1), torch.bool)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_3, buf8, 128, grid=grid(128), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 2), (2, 1), 0), reinterpret_tensor(primals_4, (2, 1), (1, 2), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 1, 1, 4, 1), (16, 4, 4, 4, 1, 1), 0); del buf2 # reuse
buf7 = empty_strided_cuda((4, 4, 1, 1, 4, 1), (16, 4, 4, 4, 1, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf7, 64, grid=grid(64), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (64, 1), (1, 0), 0), reinterpret_tensor(primals_6, (1, 2), (1, 1), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 1, 1, 4, 2), (32, 8, 8, 8, 2, 1), 0); del buf4 # reuse
buf6 = empty_strided_cuda((4, 4, 1, 1, 4, 2), (32, 8, 8, 8, 2, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf5, primals_7, buf6, 128, grid=grid(128), stream=stream0)
del primals_7
return (reinterpret_tensor(buf5, (4, 32), (32, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 2), (2, 1), 0), reinterpret_tensor(buf3, (64, 1), (1, 1), 0), buf6, primals_6, buf7, primals_4, buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 2), (2, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((2, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics import *
class Discriminator(nn.Module):
def __init__(self, outputs_size, K=2):
super(Discriminator, self).__init__()
self.fc1 = nn.Linear(outputs_size, outputs_size // K, bias=True)
outputs_size = outputs_size // K
self.fc2 = nn.Linear(outputs_size, outputs_size // K, bias=True)
outputs_size = outputs_size // K
self.fc3 = nn.Linear(outputs_size, 2, bias=True)
def forward(self, x):
x = x[:, :, None, None]
out = F.relu(self.fc1(x))
out = F.relu(self.fc2(out))
out = F.relu(self.fc3(out))
out = out.view(out.size(0), -1)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'outputs_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from sklearn.metrics import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = 0.0
tmp7 = tmp5 <= tmp6
tl.store(in_out_ptr0 + x0, tmp5, xmask)
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2, 4), (4, 1))
assert_size_stride(primals_3, (2,), (1,))
assert_size_stride(primals_4, (1, 2), (2, 1))
assert_size_stride(primals_5, (1,), (1,))
assert_size_stride(primals_6, (2, 1), (1, 1))
assert_size_stride(primals_7, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 2), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1, 4, 2), (32, 8, 8, 8, 2,
1), 0)
del buf0
buf8 = empty_strided_cuda((4, 4, 1, 1, 4, 2), (32, 8, 8, 8, 2, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(128)](buf1,
primals_3, buf8, 128, XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 2), (2, 1), 0),
reinterpret_tensor(primals_4, (2, 1), (1, 2), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 1, 1, 4, 1), (16, 4, 4, 4, 1,
1), 0)
del buf2
buf7 = empty_strided_cuda((4, 4, 1, 1, 4, 1), (16, 4, 4, 4, 1, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(64)](buf3,
primals_5, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 1), (1, 0), 0),
reinterpret_tensor(primals_6, (1, 2), (1, 1), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 1, 1, 4, 2), (32, 8, 8, 8, 2,
1), 0)
del buf4
buf6 = empty_strided_cuda((4, 4, 1, 1, 4, 2), (32, 8, 8, 8, 2, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(128)](buf5,
primals_7, buf6, 128, XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
return reinterpret_tensor(buf5, (4, 32), (32, 1), 0), reinterpret_tensor(
primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 2), (
2, 1), 0), reinterpret_tensor(buf3, (64, 1), (1, 1), 0
), buf6, primals_6, buf7, primals_4, buf8
class DiscriminatorNew(nn.Module):
def __init__(self, outputs_size, K=2):
super(DiscriminatorNew, self).__init__()
self.fc1 = nn.Linear(outputs_size, outputs_size // K, bias=True)
outputs_size = outputs_size // K
self.fc2 = nn.Linear(outputs_size, outputs_size // K, bias=True)
outputs_size = outputs_size // K
self.fc3 = nn.Linear(outputs_size, 2, bias=True)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| Ulian7/DeepCTR | Discriminator | false | 1,184 | [
"Apache-2.0"
] | 0 | d8f519a722a4d6a4f1fe18e04af54cfd1369c9a5 | https://github.com/Ulian7/DeepCTR/tree/d8f519a722a4d6a4f1fe18e04af54cfd1369c9a5 | import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics import *
class Model(nn.Module):
def __init__(self, outputs_size, K=2):
super().__init__()
self.fc1 = nn.Linear(outputs_size, outputs_size // K, bias=True)
outputs_size = outputs_size // K
self.fc2 = nn.Linear(outputs_size, outputs_size // K, bias=True)
outputs_size = outputs_size // K
self.fc3 = nn.Linear(outputs_size, 2, bias=True)
def forward(self, x):
x = x[:, :, None, None]
out = F.relu(self.fc1(x))
out = F.relu(self.fc2(out))
out = F.relu(self.fc3(out))
out = out.view(out.size(0), -1)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
APL | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/kg/ckgosn72xsu5vxglrxam5q6utx6ugdbmjmklsfq42vhqoezat4d3.py
# Topologically Sorted Source Nodes: [output, neg, t, clamp_1, mul, output_1, t_1, clamp_2, mul_1, output_2, t_2, clamp_3, mul_2, output_3, t_3, clamp_4, mul_3, output_4], Original ATen: [aten.clamp, aten.neg, aten.add, aten.mul]
# Source node to ATen node mapping:
# clamp_1 => clamp_min_1
# clamp_2 => clamp_min_2
# clamp_3 => clamp_min_3
# clamp_4 => clamp_min_4
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# neg => neg
# output => clamp_min
# output_1 => add_1
# output_2 => add_3
# output_3 => add_5
# output_4 => add_7
# t => add
# t_1 => add_2
# t_2 => add_4
# t_3 => add_6
# Graph fragment:
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%primals_1, 0), kwargs = {})
# %neg : [num_users=4] = call_function[target=torch.ops.aten.neg.default](args = (%primals_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg, %select), kwargs = {})
# %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_1, %clamp_min_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%clamp_min, %mul), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg, %select_2), kwargs = {})
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add_2, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_3, %clamp_min_2), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %mul_1), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg, %select_4), kwargs = {})
# %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add_4, 0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_5, %clamp_min_3), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %mul_2), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg, %select_6), kwargs = {})
# %clamp_min_4 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add_6, 0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_7, %clamp_min_4), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %mul_3), kwargs = {})
triton_poi_fused_add_clamp_mul_neg_0 = async_compile.triton('triton_poi_fused_add_clamp_mul_neg_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_mul_neg_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_mul_neg_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + (4 + x0), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr2 + (8 + x0), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr2 + (12 + x0), xmask, eviction_policy='evict_last')
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = -tmp0
tmp6 = tmp4 + tmp5
tmp7 = triton_helpers.maximum(tmp6, tmp1)
tmp8 = tmp3 * tmp7
tmp9 = tmp2 + tmp8
tmp12 = tmp4 + tmp11
tmp13 = triton_helpers.maximum(tmp12, tmp1)
tmp14 = tmp10 * tmp13
tmp15 = tmp9 + tmp14
tmp18 = tmp4 + tmp17
tmp19 = triton_helpers.maximum(tmp18, tmp1)
tmp20 = tmp16 * tmp19
tmp21 = tmp15 + tmp20
tmp24 = tmp4 + tmp23
tmp25 = triton_helpers.maximum(tmp24, tmp1)
tmp26 = tmp22 * tmp25
tmp27 = tmp21 + tmp26
tl.store(out_ptr0 + (x2), tmp27, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output, neg, t, clamp_1, mul, output_1, t_1, clamp_2, mul_1, output_2, t_2, clamp_3, mul_2, output_3, t_3, clamp_4, mul_3, output_4], Original ATen: [aten.clamp, aten.neg, aten.add, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_add_clamp_mul_neg_0.run(primals_1, primals_3, primals_2, buf0, 256, grid=grid(256), stream=stream0)
return (buf0, primals_1, primals_2, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from torch.nn.parameter import Parameter
class APL(nn.Module):
"""
Implementation of APL (ADAPTIVE PIECEWISE LINEAR UNITS) unit:
.. math::
APL(x_i) = max(0,x) + \\sum_{s=1}^{S}{a_i^s * max(0, -x + b_i^s)}
with trainable parameters a and b, parameter S should be set in advance.
Shape:
- Input: (N, *) where * means, any number of additional
dimensions
- Output: (N, *), same shape as the input
Parameters:
- S: hyperparameter, number of hinges to be set in advance
- a: trainable parameter, control the slopes of the linear segments
- b: trainable parameter, determine the locations of the hinges
References:
- See APL paper:
https://arxiv.org/pdf/1412.6830.pdf
Examples:
>>> a1 = apl(256, S = 1)
>>> x = torch.randn(256)
>>> x = a1(x)
"""
def __init__(self, in_features, S, a=None, b=None):
"""
Initialization.
INPUT:
- in_features: shape of the input
- S (int): number of hinges
- a - value for initialization of parameter, which controls the slopes of the linear segments
- b - value for initialization of parameter, which determines the locations of the hinges
a, b are initialized randomly by default
"""
super(APL, self).__init__()
self.in_features = in_features
self.S = S
if a is None:
self.a = Parameter(torch.randn((S, in_features), dtype=torch.
float, requires_grad=True))
else:
self.a = a
if b is None:
self.b = Parameter(torch.randn((S, in_features), dtype=torch.
float, requires_grad=True))
else:
self.b = b
def forward(self, x):
"""
Forward pass of the function
"""
output = x.clamp(min=0)
for s in range(self.S):
t = -x + self.b[s]
output += self.a[s] * t.clamp(min=0)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'S': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_clamp_mul_neg_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + (4 + x0), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr2 + (8 + x0), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr2 + (12 + x0), xmask, eviction_policy='evict_last')
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = -tmp0
tmp6 = tmp4 + tmp5
tmp7 = triton_helpers.maximum(tmp6, tmp1)
tmp8 = tmp3 * tmp7
tmp9 = tmp2 + tmp8
tmp12 = tmp4 + tmp11
tmp13 = triton_helpers.maximum(tmp12, tmp1)
tmp14 = tmp10 * tmp13
tmp15 = tmp9 + tmp14
tmp18 = tmp4 + tmp17
tmp19 = triton_helpers.maximum(tmp18, tmp1)
tmp20 = tmp16 * tmp19
tmp21 = tmp15 + tmp20
tmp24 = tmp4 + tmp23
tmp25 = triton_helpers.maximum(tmp24, tmp1)
tmp26 = tmp22 * tmp25
tmp27 = tmp21 + tmp26
tl.store(out_ptr0 + x2, tmp27, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_clamp_mul_neg_0[grid(256)](primals_1,
primals_3, primals_2, buf0, 256, XBLOCK=256, num_warps=4,
num_stages=1)
return buf0, primals_1, primals_2, primals_3
class APLNew(nn.Module):
"""
Implementation of APL (ADAPTIVE PIECEWISE LINEAR UNITS) unit:
.. math::
APL(x_i) = max(0,x) + \\sum_{s=1}^{S}{a_i^s * max(0, -x + b_i^s)}
with trainable parameters a and b, parameter S should be set in advance.
Shape:
- Input: (N, *) where * means, any number of additional
dimensions
- Output: (N, *), same shape as the input
Parameters:
- S: hyperparameter, number of hinges to be set in advance
- a: trainable parameter, control the slopes of the linear segments
- b: trainable parameter, determine the locations of the hinges
References:
- See APL paper:
https://arxiv.org/pdf/1412.6830.pdf
Examples:
>>> a1 = apl(256, S = 1)
>>> x = torch.randn(256)
>>> x = a1(x)
"""
def __init__(self, in_features, S, a=None, b=None):
"""
Initialization.
INPUT:
- in_features: shape of the input
- S (int): number of hinges
- a - value for initialization of parameter, which controls the slopes of the linear segments
- b - value for initialization of parameter, which determines the locations of the hinges
a, b are initialized randomly by default
"""
super(APLNew, self).__init__()
self.in_features = in_features
self.S = S
if a is None:
self.a = Parameter(torch.randn((S, in_features), dtype=torch.
float, requires_grad=True))
else:
self.a = a
if b is None:
self.b = Parameter(torch.randn((S, in_features), dtype=torch.
float, requires_grad=True))
else:
self.b = b
def forward(self, input_0):
primals_2 = self.a
primals_3 = self.b
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| Venkateshwar2506/Echo | APL | false | 1,185 | [
"MIT"
] | 0 | 5d236b25ee4900754f48e0a865e1bf1ae9183875 | https://github.com/Venkateshwar2506/Echo/tree/5d236b25ee4900754f48e0a865e1bf1ae9183875 | import torch
from torch import nn
from torch.nn.parameter import Parameter
class Model(nn.Module):
"""
Implementation of APL (ADAPTIVE PIECEWISE LINEAR UNITS) unit:
.. math::
APL(x_i) = max(0,x) + \\sum_{s=1}^{S}{a_i^s * max(0, -x + b_i^s)}
with trainable parameters a and b, parameter S should be set in advance.
Shape:
- Input: (N, *) where * means, any number of additional
dimensions
- Output: (N, *), same shape as the input
Parameters:
- S: hyperparameter, number of hinges to be set in advance
- a: trainable parameter, control the slopes of the linear segments
- b: trainable parameter, determine the locations of the hinges
References:
- See APL paper:
https://arxiv.org/pdf/1412.6830.pdf
Examples:
>>> a1 = apl(256, S = 1)
>>> x = torch.randn(256)
>>> x = a1(x)
"""
def __init__(self, in_features, S, a=None, b=None):
"""
Initialization.
INPUT:
- in_features: shape of the input
- S (int): number of hinges
- a - value for initialization of parameter, which controls the slopes of the linear segments
- b - value for initialization of parameter, which determines the locations of the hinges
a, b are initialized randomly by default
"""
super().__init__()
self.in_features = in_features
self.S = S
if a is None:
self.a = Parameter(torch.randn((S, in_features), dtype=torch.
float, requires_grad=True))
else:
self.a = a
if b is None:
self.b = Parameter(torch.randn((S, in_features), dtype=torch.
float, requires_grad=True))
else:
self.b = b
def forward(self, x):
"""
Forward pass of the function
"""
output = x.clamp(min=0)
for s in range(self.S):
t = -x + self.b[s]
output += self.a[s] * t.clamp(min=0)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
ClipLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/up/cuphmudfbm5xmuymbvibhn5q6e5y55prj6knqkmj43mx7ovcvcgz.py
# Topologically Sorted Source Nodes: [labels], Original ATen: [aten.arange]
# Source node to ATen node mapping:
# labels => iota
# Graph fragment:
# %iota : [num_users=9] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
triton_poi_fused_arange_0 = async_compile.triton('triton_poi_fused_arange_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_arange_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_arange_0(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/fe/cfeaysvgrb354rjrmwuwoqxvkiirbz7k33fi7kmoxrhyavv746t7.py
# Topologically Sorted Source Nodes: [mul, mul_1], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# mul_1 => mul_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %arg2_1), kwargs = {})
triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp3 = tl.load(in_ptr2 + (x0), xmask)
tmp2 = tmp0 * tmp1
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr1 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/gd/cgdq755g3clp3t5icrbudwx4ir4xygtoz6ug4jo2euegtyg5mdnp.py
# Topologically Sorted Source Nodes: [cross_entropy], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# cross_entropy => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mm, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mm, %amax), kwargs = {})
triton_poi_fused__log_softmax_2 = async_compile.triton('triton_poi_fused__log_softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ao/caokdwufonglm4yxauurmohi3zlbq5arj7el5e3pvrhppoklt3gs.py
# Topologically Sorted Source Nodes: [cross_entropy, cross_entropy_1, add, total_loss], Original ATen: [aten.nll_loss_forward, aten.add, aten.div]
# Source node to ATen node mapping:
# add => add
# cross_entropy => convert_element_type, div, full_default_1, ne_1, ne_2, neg, sum_2, sum_3, where_1
# cross_entropy_1 => convert_element_type_1, div_1, full_default_3, ne_4, ne_5, neg_1, sum_5, sum_6, where_3
# total_loss => div_2
# Graph fragment:
# %ne_1 : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%iota, -100), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%squeeze,), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%ne_1, %neg, %full_default_1), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%where_1,), kwargs = {})
# %ne_2 : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%iota, -100), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%ne_2,), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%sum_2, torch.float32), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, %convert_element_type), kwargs = {})
# %ne_4 : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%iota, -100), kwargs = {})
# %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%squeeze_1,), kwargs = {})
# %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%ne_4, %neg_1, %full_default_3), kwargs = {})
# %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%where_3,), kwargs = {})
# %ne_5 : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%iota, -100), kwargs = {})
# %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%ne_5,), kwargs = {})
# %convert_element_type_1 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%sum_5, torch.float32), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_6, %convert_element_type_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %div_1), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, 2), kwargs = {})
triton_per_fused_add_div_nll_loss_forward_3 = async_compile.triton('triton_per_fused_add_div_nll_loss_forward_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_nll_loss_forward_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_nll_loss_forward_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp6 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr1 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr1 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr1 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp0 = r0
tmp1 = tl.full([1, 1], -100, tl.int64)
tmp2 = tmp0 != tmp1
tmp3 = tl.full([1, 1], 0, tl.int64)
tmp4 = tl.where(tmp2, tmp0, tmp3)
tmp5 = tl.load(in_ptr0 + (tmp4 + (4*r0)), None, eviction_policy='evict_last')
tmp7 = tl_math.exp(tmp6)
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp12 = tl_math.exp(tmp11)
tmp13 = tmp10 + tmp12
tmp15 = tl_math.exp(tmp14)
tmp16 = tmp13 + tmp15
tmp17 = tl_math.log(tmp16)
tmp18 = tmp5 - tmp17
tmp19 = -tmp18
tmp20 = 0.0
tmp21 = tl.where(tmp2, tmp19, tmp20)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK])
tmp24 = tl.sum(tmp22, 1)[:, None]
tmp25 = tl.load(in_ptr1 + (tmp4 + (4*r0)), None, eviction_policy='evict_last')
tmp27 = tl_math.exp(tmp26)
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp27 + tmp29
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp30 + tmp32
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp33 + tmp35
tmp37 = tl_math.log(tmp36)
tmp38 = tmp25 - tmp37
tmp39 = -tmp38
tmp40 = tl.where(tmp2, tmp39, tmp20)
tmp41 = tl.broadcast_to(tmp40, [XBLOCK, RBLOCK])
tmp43 = tl.sum(tmp41, 1)[:, None]
tmp44 = tmp2.to(tl.int64)
tmp45 = tl.broadcast_to(tmp44, [XBLOCK, RBLOCK])
tmp47 = tl.sum(tmp45, 1)[:, None]
tmp48 = tmp47.to(tl.float32)
tmp49 = tmp24 / tmp48
tmp50 = tmp43 / tmp48
tmp51 = tmp49 + tmp50
tmp52 = 0.5
tmp53 = tmp51 * tmp52
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp53, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [labels], Original ATen: [aten.arange]
stream0 = get_raw_stream(0)
triton_poi_fused_arange_0.run(buf0, 4, grid=grid(4), stream=stream0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, mul_1], Original ATen: [aten.mul]
triton_poi_fused_mul_1.run(arg1_1, arg0_1, arg2_1, buf1, buf6, 16, grid=grid(16), stream=stream0)
del arg1_1
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, logits_per_image], Original ATen: [aten.mul, aten.mm]
extern_kernels.mm(buf1, reinterpret_tensor(arg2_1, (4, 4), (1, 4), 0), out=buf2)
del arg2_1
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [cross_entropy], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_2.run(buf2, buf3, 16, grid=grid(16), stream=stream0)
buf7 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [mul_1, logits_per_text], Original ATen: [aten.mul, aten.mm]
extern_kernels.mm(buf6, reinterpret_tensor(arg0_1, (4, 4), (1, 4), 0), out=buf7)
del arg0_1
buf8 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [cross_entropy_1], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_2.run(buf7, buf8, 16, grid=grid(16), stream=stream0)
del buf7
buf4 = empty_strided_cuda((), (), torch.float32)
buf11 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [cross_entropy, cross_entropy_1, add, total_loss], Original ATen: [aten.nll_loss_forward, aten.add, aten.div]
triton_per_fused_add_div_nll_loss_forward_3.run(buf11, buf3, buf8, 1, 4, grid=grid(1), stream=stream0)
del buf3
del buf8
return (buf11, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
from torch import nn
import torch.distributed as dist
import torch.distributed.nn
def gather_features(image_features, text_features, aug1_embed=None,
aug2_embed=None, local_loss=False, gather_with_grad=False, rank=0,
world_size=1, horovod=False):
if horovod:
assert hvd is not None, 'Please install horovod'
if gather_with_grad:
all_image_features = hvd.allgather(image_features)
all_text_features = hvd.allgather(text_features)
if aug1_embed is not None and aug2_embed is not None:
all_aug1_embed = hvd.allgather(aug1_embed)
all_aug2_embed = hvd.allgather(aug2_embed)
else:
all_aug1_embed, all_aug2_embed = None, None
else:
with torch.no_grad():
all_image_features = hvd.allgather(image_features)
all_text_features = hvd.allgather(text_features)
if aug1_embed is not None and aug2_embed is not None:
all_aug1_embed = hvd.allgather(aug1_embed)
all_aug2_embed = hvd.allgather(aug2_embed)
else:
all_aug1_embed, all_aug2_embed = None, None
if not local_loss:
gathered_image_features = list(all_image_features.chunk(
world_size, dim=0))
gathered_text_features = list(all_text_features.chunk(
world_size, dim=0))
gathered_image_features[rank] = image_features
gathered_text_features[rank] = text_features
all_image_features = torch.cat(gathered_image_features, dim=0)
all_text_features = torch.cat(gathered_text_features, dim=0)
if aug1_embed is not None and aug2_embed is not None:
gathered_aug1_embed = list(all_aug1_embed.chunk(
world_size, dim=0))
gathered_aug2_embed = list(all_aug2_embed.chunk(
world_size, dim=0))
gathered_aug1_embed[rank] = aug1_embed
gathered_aug2_embed[rank] = aug2_embed
all_aug1_embed = torch.cat(gathered_aug1_embed, dim=0)
all_aug2_embed = torch.cat(gathered_aug2_embed, dim=0)
else:
all_aug1_embed, all_aug2_embed = None, None
elif gather_with_grad:
all_image_features = torch.cat(torch.distributed.nn.all_gather(
image_features), dim=0)
all_text_features = torch.cat(torch.distributed.nn.all_gather(
text_features), dim=0)
if aug1_embed is not None and aug2_embed is not None:
all_aug1_embed = torch.cat(torch.distributed.nn.all_gather(
aug1_embed), dim=0)
all_aug2_embed = torch.cat(torch.distributed.nn.all_gather(
aug2_embed), dim=0)
else:
all_aug1_embed, all_aug2_embed = None, None
else:
gathered_image_features = [torch.zeros_like(image_features) for _ in
range(world_size)]
gathered_text_features = [torch.zeros_like(text_features) for _ in
range(world_size)]
dist.all_gather(gathered_image_features, image_features)
dist.all_gather(gathered_text_features, text_features)
if aug1_embed is not None and aug2_embed is not None:
gathered_aug1_embed = [torch.zeros_like(aug1_embed) for _ in
range(world_size)]
gathered_aug2_embed = [torch.zeros_like(aug2_embed) for _ in
range(world_size)]
dist.all_gather(gathered_aug1_embed, aug1_embed)
dist.all_gather(gathered_aug2_embed, aug2_embed)
all_aug1_embed = torch.cat(gathered_aug1_embed, dim=0)
all_aug2_embed = torch.cat(gathered_aug2_embed, dim=0)
if not local_loss:
all_aug1_embed[rank] = aug1_embed
all_aug2_embed[rank] = aug2_embed
else:
all_aug1_embed, all_aug2_embed = None, None
if not local_loss:
gathered_image_features[rank] = image_features
gathered_text_features[rank] = text_features
all_image_features = torch.cat(gathered_image_features, dim=0)
all_text_features = torch.cat(gathered_text_features, dim=0)
return (all_image_features, all_text_features, all_aug1_embed,
all_aug2_embed)
class ClipLoss(nn.Module):
def __init__(self, local_loss=False, gather_with_grad=False, rank=0,
world_size=1, horovod=False):
super().__init__()
self.local_loss = local_loss
self.gather_with_grad = gather_with_grad
self.rank = rank
self.world_size = world_size
self.horovod = horovod
self.prev_num_logits = 0
self.labels = {}
def forward(self, image_features, text_features, logit_scale):
device = image_features.device
if self.world_size > 1:
all_image_features, all_text_features, _, _ = gather_features(
image_features, text_features, None, None, self.local_loss,
self.gather_with_grad, self.rank, self.world_size, self.horovod
)
if self.local_loss:
logits_per_image = (logit_scale * image_features @
all_text_features.T)
logits_per_text = (logit_scale * text_features @
all_image_features.T)
else:
logits_per_image = (logit_scale * all_image_features @
all_text_features.T)
logits_per_text = logits_per_image.T
else:
logits_per_image = logit_scale * image_features @ text_features.T
logits_per_text = logit_scale * text_features @ image_features.T
num_logits = logits_per_image.shape[0]
if self.prev_num_logits != num_logits or device not in self.labels:
labels = torch.arange(num_logits, device=device, dtype=torch.long)
if self.world_size > 1 and self.local_loss:
labels = labels + num_logits * self.rank
self.labels[device] = labels
else:
labels = self.labels[device]
total_loss = (F.cross_entropy(logits_per_image, labels) + F.
cross_entropy(logits_per_text, labels)) / 2
return total_loss
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
import torch.distributed as dist
import torch.distributed.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_arange_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp3 = tl.load(in_ptr2 + x0, xmask)
tmp2 = tmp0 * tmp1
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr1 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_per_fused_add_div_nll_loss_forward_3(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp6 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp0 = r0
tmp1 = tl.full([1, 1], -100, tl.int64)
tmp2 = tmp0 != tmp1
tmp3 = tl.full([1, 1], 0, tl.int64)
tmp4 = tl.where(tmp2, tmp0, tmp3)
tmp5 = tl.load(in_ptr0 + (tmp4 + 4 * r0), None, eviction_policy=
'evict_last')
tmp7 = tl_math.exp(tmp6)
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp12 = tl_math.exp(tmp11)
tmp13 = tmp10 + tmp12
tmp15 = tl_math.exp(tmp14)
tmp16 = tmp13 + tmp15
tmp17 = tl_math.log(tmp16)
tmp18 = tmp5 - tmp17
tmp19 = -tmp18
tmp20 = 0.0
tmp21 = tl.where(tmp2, tmp19, tmp20)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK])
tmp24 = tl.sum(tmp22, 1)[:, None]
tmp25 = tl.load(in_ptr1 + (tmp4 + 4 * r0), None, eviction_policy=
'evict_last')
tmp27 = tl_math.exp(tmp26)
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp27 + tmp29
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp30 + tmp32
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp33 + tmp35
tmp37 = tl_math.log(tmp36)
tmp38 = tmp25 - tmp37
tmp39 = -tmp38
tmp40 = tl.where(tmp2, tmp39, tmp20)
tmp41 = tl.broadcast_to(tmp40, [XBLOCK, RBLOCK])
tmp43 = tl.sum(tmp41, 1)[:, None]
tmp44 = tmp2.to(tl.int64)
tmp45 = tl.broadcast_to(tmp44, [XBLOCK, RBLOCK])
tmp47 = tl.sum(tmp45, 1)[:, None]
tmp48 = tmp47.to(tl.float32)
tmp49 = tmp24 / tmp48
tmp50 = tmp43 / tmp48
tmp51 = tmp49 + tmp50
tmp52 = 0.5
tmp53 = tmp51 * tmp52
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp53, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.int64)
get_raw_stream(0)
triton_poi_fused_arange_0[grid(4)](buf0, 4, XBLOCK=4, num_warps=1,
num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_mul_1[grid(16)](arg1_1, arg0_1, arg2_1, buf1, buf6,
16, XBLOCK=16, num_warps=1, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(arg2_1, (4, 4), (1, 4),
0), out=buf2)
del arg2_1
buf3 = buf1
del buf1
triton_poi_fused__log_softmax_2[grid(16)](buf2, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf7 = buf2
del buf2
extern_kernels.mm(buf6, reinterpret_tensor(arg0_1, (4, 4), (1, 4),
0), out=buf7)
del arg0_1
buf8 = buf6
del buf6
triton_poi_fused__log_softmax_2[grid(16)](buf7, buf8, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf7
buf4 = empty_strided_cuda((), (), torch.float32)
buf11 = buf4
del buf4
triton_per_fused_add_div_nll_loss_forward_3[grid(1)](buf11, buf3,
buf8, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf3
del buf8
return buf11, buf0
def gather_features(image_features, text_features, aug1_embed=None,
aug2_embed=None, local_loss=False, gather_with_grad=False, rank=0,
world_size=1, horovod=False):
if horovod:
assert hvd is not None, 'Please install horovod'
if gather_with_grad:
all_image_features = hvd.allgather(image_features)
all_text_features = hvd.allgather(text_features)
if aug1_embed is not None and aug2_embed is not None:
all_aug1_embed = hvd.allgather(aug1_embed)
all_aug2_embed = hvd.allgather(aug2_embed)
else:
all_aug1_embed, all_aug2_embed = None, None
else:
with torch.no_grad():
all_image_features = hvd.allgather(image_features)
all_text_features = hvd.allgather(text_features)
if aug1_embed is not None and aug2_embed is not None:
all_aug1_embed = hvd.allgather(aug1_embed)
all_aug2_embed = hvd.allgather(aug2_embed)
else:
all_aug1_embed, all_aug2_embed = None, None
if not local_loss:
gathered_image_features = list(all_image_features.chunk(
world_size, dim=0))
gathered_text_features = list(all_text_features.chunk(
world_size, dim=0))
gathered_image_features[rank] = image_features
gathered_text_features[rank] = text_features
all_image_features = torch.cat(gathered_image_features, dim=0)
all_text_features = torch.cat(gathered_text_features, dim=0)
if aug1_embed is not None and aug2_embed is not None:
gathered_aug1_embed = list(all_aug1_embed.chunk(
world_size, dim=0))
gathered_aug2_embed = list(all_aug2_embed.chunk(
world_size, dim=0))
gathered_aug1_embed[rank] = aug1_embed
gathered_aug2_embed[rank] = aug2_embed
all_aug1_embed = torch.cat(gathered_aug1_embed, dim=0)
all_aug2_embed = torch.cat(gathered_aug2_embed, dim=0)
else:
all_aug1_embed, all_aug2_embed = None, None
elif gather_with_grad:
all_image_features = torch.cat(torch.distributed.nn.all_gather(
image_features), dim=0)
all_text_features = torch.cat(torch.distributed.nn.all_gather(
text_features), dim=0)
if aug1_embed is not None and aug2_embed is not None:
all_aug1_embed = torch.cat(torch.distributed.nn.all_gather(
aug1_embed), dim=0)
all_aug2_embed = torch.cat(torch.distributed.nn.all_gather(
aug2_embed), dim=0)
else:
all_aug1_embed, all_aug2_embed = None, None
else:
gathered_image_features = [torch.zeros_like(image_features) for _ in
range(world_size)]
gathered_text_features = [torch.zeros_like(text_features) for _ in
range(world_size)]
dist.all_gather(gathered_image_features, image_features)
dist.all_gather(gathered_text_features, text_features)
if aug1_embed is not None and aug2_embed is not None:
gathered_aug1_embed = [torch.zeros_like(aug1_embed) for _ in
range(world_size)]
gathered_aug2_embed = [torch.zeros_like(aug2_embed) for _ in
range(world_size)]
dist.all_gather(gathered_aug1_embed, aug1_embed)
dist.all_gather(gathered_aug2_embed, aug2_embed)
all_aug1_embed = torch.cat(gathered_aug1_embed, dim=0)
all_aug2_embed = torch.cat(gathered_aug2_embed, dim=0)
if not local_loss:
all_aug1_embed[rank] = aug1_embed
all_aug2_embed[rank] = aug2_embed
else:
all_aug1_embed, all_aug2_embed = None, None
if not local_loss:
gathered_image_features[rank] = image_features
gathered_text_features[rank] = text_features
all_image_features = torch.cat(gathered_image_features, dim=0)
all_text_features = torch.cat(gathered_text_features, dim=0)
return (all_image_features, all_text_features, all_aug1_embed,
all_aug2_embed)
class ClipLossNew(nn.Module):
def __init__(self, local_loss=False, gather_with_grad=False, rank=0,
world_size=1, horovod=False):
super().__init__()
self.local_loss = local_loss
self.gather_with_grad = gather_with_grad
self.rank = rank
self.world_size = world_size
self.horovod = horovod
self.prev_num_logits = 0
self.labels = {}
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| Vaishaal/open_clip | ClipLoss | false | 1,186 | [
"MIT"
] | 0 | 8877c4036dacde022da90769c64006d9f2c82e84 | https://github.com/Vaishaal/open_clip/tree/8877c4036dacde022da90769c64006d9f2c82e84 | import torch
import torch.nn.functional as F
from torch import nn
import torch.distributed as dist
import torch.distributed.nn
def gather_features(image_features, text_features, aug1_embed=None,
aug2_embed=None, local_loss=False, gather_with_grad=False, rank=0,
world_size=1, horovod=False):
if horovod:
assert hvd is not None, 'Please install horovod'
if gather_with_grad:
all_image_features = hvd.allgather(image_features)
all_text_features = hvd.allgather(text_features)
if aug1_embed is not None and aug2_embed is not None:
all_aug1_embed = hvd.allgather(aug1_embed)
all_aug2_embed = hvd.allgather(aug2_embed)
else:
all_aug1_embed, all_aug2_embed = None, None
else:
with torch.no_grad():
all_image_features = hvd.allgather(image_features)
all_text_features = hvd.allgather(text_features)
if aug1_embed is not None and aug2_embed is not None:
all_aug1_embed = hvd.allgather(aug1_embed)
all_aug2_embed = hvd.allgather(aug2_embed)
else:
all_aug1_embed, all_aug2_embed = None, None
if not local_loss:
gathered_image_features = list(all_image_features.chunk(
world_size, dim=0))
gathered_text_features = list(all_text_features.chunk(
world_size, dim=0))
gathered_image_features[rank] = image_features
gathered_text_features[rank] = text_features
all_image_features = torch.cat(gathered_image_features, dim=0)
all_text_features = torch.cat(gathered_text_features, dim=0)
if aug1_embed is not None and aug2_embed is not None:
gathered_aug1_embed = list(all_aug1_embed.chunk(
world_size, dim=0))
gathered_aug2_embed = list(all_aug2_embed.chunk(
world_size, dim=0))
gathered_aug1_embed[rank] = aug1_embed
gathered_aug2_embed[rank] = aug2_embed
all_aug1_embed = torch.cat(gathered_aug1_embed, dim=0)
all_aug2_embed = torch.cat(gathered_aug2_embed, dim=0)
else:
all_aug1_embed, all_aug2_embed = None, None
elif gather_with_grad:
all_image_features = torch.cat(torch.distributed.nn.all_gather(
image_features), dim=0)
all_text_features = torch.cat(torch.distributed.nn.all_gather(
text_features), dim=0)
if aug1_embed is not None and aug2_embed is not None:
all_aug1_embed = torch.cat(torch.distributed.nn.all_gather(
aug1_embed), dim=0)
all_aug2_embed = torch.cat(torch.distributed.nn.all_gather(
aug2_embed), dim=0)
else:
all_aug1_embed, all_aug2_embed = None, None
else:
gathered_image_features = [torch.zeros_like(image_features) for _ in
range(world_size)]
gathered_text_features = [torch.zeros_like(text_features) for _ in
range(world_size)]
dist.all_gather(gathered_image_features, image_features)
dist.all_gather(gathered_text_features, text_features)
if aug1_embed is not None and aug2_embed is not None:
gathered_aug1_embed = [torch.zeros_like(aug1_embed) for _ in
range(world_size)]
gathered_aug2_embed = [torch.zeros_like(aug2_embed) for _ in
range(world_size)]
dist.all_gather(gathered_aug1_embed, aug1_embed)
dist.all_gather(gathered_aug2_embed, aug2_embed)
all_aug1_embed = torch.cat(gathered_aug1_embed, dim=0)
all_aug2_embed = torch.cat(gathered_aug2_embed, dim=0)
if not local_loss:
all_aug1_embed[rank] = aug1_embed
# ... truncated (>4000 chars) for memory efficiency |
Encoder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/lp/clp5td7lbqtje3pt7v6xbcp766swgazqemomz2nzsxtdtmjesxht.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/el/celk5adxq2zxhymud4napv6zuj52qpmoxz7wanhqayimjnku6r23.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_1 => cat
# Graph fragment:
# %cat : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%relu, %convolution_1], 1), kwargs = {})
triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 4096) % 32
x0 = xindex % 4096
x2 = (xindex // 131072)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 16, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (65536*x2)), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 32, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (4096*((-16) + x1)) + (65536*x2)), tmp6, other=0.0)
tmp10 = tl.load(in_ptr2 + ((-16) + x1), tmp6, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = tl.where(tmp4, tmp5, tmp13)
tl.store(out_ptr0 + (x3), tmp14, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/xq/cxqtpoguh5q4lo7aqfwpnku6psmnd6vobmblp2f7t5p2vhk4zkez.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_2 => cat_1
# Graph fragment:
# %cat_1 : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%cat, %convolution_2], 1), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 786432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 4096) % 48
x0 = xindex % 4096
x2 = (xindex // 196608)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (131072*x2)), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 48, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (4096*((-32) + x1)) + (65536*x2)), tmp6, other=0.0)
tmp10 = tl.load(in_ptr2 + ((-32) + x1), tmp6, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = tl.where(tmp4, tmp5, tmp13)
tl.store(out_ptr0 + (x3), tmp14, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/sj/csjg2l7n7qdes27kdgvihemqiptcame7dsxapgvl7acr5yl7mnct.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_3 => cat_2
# Graph fragment:
# %cat_2 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%cat_1, %convolution_3], 1), kwargs = {})
triton_poi_fused_cat_3 = async_compile.triton('triton_poi_fused_cat_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 4096) % 64
x0 = xindex % 4096
x2 = (xindex // 262144)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 48, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (196608*x2)), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 64, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (4096*((-48) + x1)) + (65536*x2)), tmp6, other=0.0)
tmp10 = tl.load(in_ptr2 + ((-48) + x1), tmp6, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = tl.where(tmp4, tmp5, tmp13)
tl.store(out_ptr0 + (x3), tmp14, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (16, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (16, ), (1, ))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (16, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (16, ), (1, ))
assert_size_stride(primals_8, (16, 48, 3, 3), (432, 9, 3, 1))
assert_size_stride(primals_9, (16, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 262144, grid=grid(262144), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf3 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(buf1, buf2, primals_5, buf3, 524288, grid=grid(524288), stream=stream0)
del buf2
del primals_5
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf5 = empty_strided_cuda((4, 48, 64, 64), (196608, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(buf3, buf4, primals_7, buf5, 786432, grid=grid(786432), stream=stream0)
del buf4
del primals_7
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf7 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.cat]
triton_poi_fused_cat_3.run(buf5, buf6, primals_9, buf7, 1048576, grid=grid(1048576), stream=stream0)
del buf6
del primals_9
return (buf7, primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf3, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((16, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((16, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((16, 48, 3, 3), (432, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
self.Conv1 = nn.Conv2d(1, 16, 3, 1, 1)
self.Relu = nn.ReLU(inplace=True)
self.layers = nn.ModuleDict({'DenseConv1': nn.Conv2d(16, 16, 3, 1,
1), 'DenseConv2': nn.Conv2d(32, 16, 3, 1, 1), 'DenseConv3': nn.
Conv2d(48, 16, 3, 1, 1)})
def forward(self, x):
x = self.Relu(self.Conv1(x))
for i in range(len(self.layers)):
out = self.layers['DenseConv' + str(i + 1)](x)
x = torch.cat([x, out], 1)
return x
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 16
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 32
x0 = xindex % 4096
x2 = xindex // 131072
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 16, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 65536 * x2), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 32, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 4096 * (-16 + x1) + 65536 * x2), tmp6,
other=0.0)
tmp10 = tl.load(in_ptr2 + (-16 + x1), tmp6, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = tl.where(tmp4, tmp5, tmp13)
tl.store(out_ptr0 + x3, tmp14, None)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 48
x0 = xindex % 4096
x2 = xindex // 196608
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 131072 * x2), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 48, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 4096 * (-32 + x1) + 65536 * x2), tmp6,
other=0.0)
tmp10 = tl.load(in_ptr2 + (-32 + x1), tmp6, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = tl.where(tmp4, tmp5, tmp13)
tl.store(out_ptr0 + x3, tmp14, None)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 64
x0 = xindex % 4096
x2 = xindex // 262144
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 48, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 196608 * x2), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 64, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 4096 * (-48 + x1) + 65536 * x2), tmp6,
other=0.0)
tmp10 = tl.load(in_ptr2 + (-48 + x1), tmp6, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = tl.where(tmp4, tmp5, tmp13)
tl.store(out_ptr0 + x3, tmp14, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (16, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (16, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (16,), (1,))
assert_size_stride(primals_8, (16, 48, 3, 3), (432, 9, 3, 1))
assert_size_stride(primals_9, (16,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(262144)](buf1, primals_2,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf3 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_1[grid(524288)](buf1, buf2, primals_5, buf3,
524288, XBLOCK=512, num_warps=8, num_stages=1)
del buf2
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf5 = empty_strided_cuda((4, 48, 64, 64), (196608, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_2[grid(786432)](buf3, buf4, primals_7, buf5,
786432, XBLOCK=1024, num_warps=4, num_stages=1)
del buf4
del primals_7
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf7 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_3[grid(1048576)](buf5, buf6, primals_9, buf7,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del buf6
del primals_9
return (buf7, primals_1, primals_3, primals_4, primals_6, primals_8,
buf1, buf3, buf5)
class EncoderNew(nn.Module):
def __init__(self):
super(EncoderNew, self).__init__()
self.Conv1 = nn.Conv2d(1, 16, 3, 1, 1)
self.Relu = nn.ReLU(inplace=True)
self.layers = nn.ModuleDict({'DenseConv1': nn.Conv2d(16, 16, 3, 1,
1), 'DenseConv2': nn.Conv2d(32, 16, 3, 1, 1), 'DenseConv3': nn.
Conv2d(48, 16, 3, 1, 1)})
def forward(self, input_0):
primals_1 = self.Conv1.weight
primals_2 = self.Conv1.bias
primals_4 = self.layers.DenseConv1.weight
primals_5 = self.layers.DenseConv1.bias
primals_6 = self.layers.DenseConv2.weight
primals_7 = self.layers.DenseConv2.bias
primals_8 = self.layers.DenseConv3.weight
primals_9 = self.layers.DenseConv3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| VarunBabbar/Image_Compressor | Encoder | false | 1,187 | [
"MIT"
] | 0 | 254d8d411f7cd16f3ce242275532c9fca537269c | https://github.com/VarunBabbar/Image_Compressor/tree/254d8d411f7cd16f3ce242275532c9fca537269c | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
self.Conv1 = nn.Conv2d(1, 16, 3, 1, 1)
self.Relu = nn.ReLU(inplace=True)
self.layers = nn.ModuleDict({'DenseConv1': nn.Conv2d(16, 16, 3, 1,
1), 'DenseConv2': nn.Conv2d(32, 16, 3, 1, 1), 'DenseConv3': nn.
Conv2d(48, 16, 3, 1, 1)})
def forward(self, x):
x = self.Relu(self.Conv1(x))
for i in range(len(self.layers)):
out = self.layers['DenseConv' + str(i + 1)](x)
x = torch.cat([x, out], 1)
return x
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return []
|
architecture | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/lp/clp5td7lbqtje3pt7v6xbcp766swgazqemomz2nzsxtdtmjesxht.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/el/celk5adxq2zxhymud4napv6zuj52qpmoxz7wanhqayimjnku6r23.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_1 => cat
# Graph fragment:
# %cat : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%relu, %convolution_1], 1), kwargs = {})
triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 4096) % 32
x0 = xindex % 4096
x2 = (xindex // 131072)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 16, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (65536*x2)), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 32, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (4096*((-16) + x1)) + (65536*x2)), tmp6, other=0.0)
tmp10 = tl.load(in_ptr2 + ((-16) + x1), tmp6, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = tl.where(tmp4, tmp5, tmp13)
tl.store(out_ptr0 + (x3), tmp14, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/xq/cxqtpoguh5q4lo7aqfwpnku6psmnd6vobmblp2f7t5p2vhk4zkez.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_2 => cat_1
# Graph fragment:
# %cat_1 : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%cat, %convolution_2], 1), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 786432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 4096) % 48
x0 = xindex % 4096
x2 = (xindex // 196608)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (131072*x2)), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 48, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (4096*((-32) + x1)) + (65536*x2)), tmp6, other=0.0)
tmp10 = tl.load(in_ptr2 + ((-32) + x1), tmp6, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = tl.where(tmp4, tmp5, tmp13)
tl.store(out_ptr0 + (x3), tmp14, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/sj/csjg2l7n7qdes27kdgvihemqiptcame7dsxapgvl7acr5yl7mnct.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_3 => cat_2
# Graph fragment:
# %cat_2 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%cat_1, %convolution_3], 1), kwargs = {})
triton_poi_fused_cat_3 = async_compile.triton('triton_poi_fused_cat_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 4096) % 64
x0 = xindex % 4096
x2 = (xindex // 262144)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 48, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (196608*x2)), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 64, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (4096*((-48) + x1)) + (65536*x2)), tmp6, other=0.0)
tmp10 = tl.load(in_ptr2 + ((-48) + x1), tmp6, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = tl.where(tmp4, tmp5, tmp13)
tl.store(out_ptr0 + (x3), tmp14, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/yz/cyzylkdlkpucjaq3h4cacl42tq4z6bgxuiyx4ieis4qfq4hjhblf.py
# Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# input_1 => convolution_4
# input_2 => relu_1
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_2, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/gy/cgybnbhivv6t3vqzcx7fdsc65yls2agg5sbpwn7byk6wweoeh5vd.py
# Topologically Sorted Source Nodes: [input_5], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# input_5 => convolution_6
# Graph fragment:
# %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_14, %primals_15, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_5 = async_compile.triton('triton_poi_fused_convolution_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), None)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + (x0), tmp3, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15 = args
args.clear()
assert_size_stride(primals_1, (16, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (16, ), (1, ))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (16, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (16, ), (1, ))
assert_size_stride(primals_8, (16, 48, 3, 3), (432, 9, 3, 1))
assert_size_stride(primals_9, (16, ), (1, ))
assert_size_stride(primals_10, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_11, (32, ), (1, ))
assert_size_stride(primals_12, (16, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_13, (16, ), (1, ))
assert_size_stride(primals_14, (1, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_15, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 262144, grid=grid(262144), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf3 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(buf1, buf2, primals_5, buf3, 524288, grid=grid(524288), stream=stream0)
del buf2
del primals_5
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf5 = empty_strided_cuda((4, 48, 64, 64), (196608, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(buf3, buf4, primals_7, buf5, 786432, grid=grid(786432), stream=stream0)
del buf4
del primals_7
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf7 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.cat]
triton_poi_fused_cat_3.run(buf5, buf6, primals_9, buf7, 1048576, grid=grid(1048576), stream=stream0)
del buf6
del primals_9
# Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf9, primals_11, 524288, grid=grid(524288), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [input_3, input_4], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_0.run(buf11, primals_13, 262144, grid=grid(262144), stream=stream0)
del primals_13
# Topologically Sorted Source Nodes: [input_5], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf11, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf13 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [input_5], Original ATen: [aten.convolution]
triton_poi_fused_convolution_5.run(buf13, primals_15, 16384, grid=grid(16384), stream=stream0)
del primals_15
return (buf13, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, buf1, buf3, buf5, buf7, buf9, buf11, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((16, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((16, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((16, 48, 3, 3), (432, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((32, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((16, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((1, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
self.layers = nn.Sequential()
self.layers.add_module('Conv3', nn.Conv2d(64, 32, 3, 1, 1))
self.layers.add_module('Act3', nn.ReLU(inplace=True))
self.layers.add_module('Conv4', nn.Conv2d(32, 16, 3, 1, 1))
self.layers.add_module('Act4', nn.ReLU(inplace=True))
self.layers.add_module('Conv5', nn.Conv2d(16, 1, 3, 1, 1))
def forward(self, x):
return self.layers(x)
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
self.Conv1 = nn.Conv2d(1, 16, 3, 1, 1)
self.Relu = nn.ReLU(inplace=True)
self.layers = nn.ModuleDict({'DenseConv1': nn.Conv2d(16, 16, 3, 1,
1), 'DenseConv2': nn.Conv2d(32, 16, 3, 1, 1), 'DenseConv3': nn.
Conv2d(48, 16, 3, 1, 1)})
def forward(self, x):
x = self.Relu(self.Conv1(x))
for i in range(len(self.layers)):
out = self.layers['DenseConv' + str(i + 1)](x)
x = torch.cat([x, out], 1)
return x
class architecture(nn.Module):
def __init__(self):
super(architecture, self).__init__()
self.encoder = Encoder()
self.decoder = Decoder()
def forward(self, x):
return self.decoder(self.encoder(x))
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 16
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 32
x0 = xindex % 4096
x2 = xindex // 131072
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 16, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 65536 * x2), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 32, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 4096 * (-16 + x1) + 65536 * x2), tmp6,
other=0.0)
tmp10 = tl.load(in_ptr2 + (-16 + x1), tmp6, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = tl.where(tmp4, tmp5, tmp13)
tl.store(out_ptr0 + x3, tmp14, None)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 48
x0 = xindex % 4096
x2 = xindex // 196608
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 131072 * x2), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 48, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 4096 * (-32 + x1) + 65536 * x2), tmp6,
other=0.0)
tmp10 = tl.load(in_ptr2 + (-32 + x1), tmp6, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = tl.where(tmp4, tmp5, tmp13)
tl.store(out_ptr0 + x3, tmp14, None)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 64
x0 = xindex % 4096
x2 = xindex // 262144
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 48, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 196608 * x2), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 64, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 4096 * (-48 + x1) + 65536 * x2), tmp6,
other=0.0)
tmp10 = tl.load(in_ptr2 + (-48 + x1), tmp6, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = tl.where(tmp4, tmp5, tmp13)
tl.store(out_ptr0 + x3, tmp14, None)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15) = args
args.clear()
assert_size_stride(primals_1, (16, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (16, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (16,), (1,))
assert_size_stride(primals_8, (16, 48, 3, 3), (432, 9, 3, 1))
assert_size_stride(primals_9, (16,), (1,))
assert_size_stride(primals_10, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_11, (32,), (1,))
assert_size_stride(primals_12, (16, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_13, (16,), (1,))
assert_size_stride(primals_14, (1, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_15, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(262144)](buf1, primals_2,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf3 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_1[grid(524288)](buf1, buf2, primals_5, buf3,
524288, XBLOCK=512, num_warps=8, num_stages=1)
del buf2
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf5 = empty_strided_cuda((4, 48, 64, 64), (196608, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_2[grid(786432)](buf3, buf4, primals_7, buf5,
786432, XBLOCK=1024, num_warps=4, num_stages=1)
del buf4
del primals_7
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf7 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_3[grid(1048576)](buf5, buf6, primals_9, buf7,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del buf6
del primals_9
buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_4[grid(524288)](buf9, primals_11,
524288, XBLOCK=512, num_warps=8, num_stages=1)
del primals_11
buf10 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf11 = buf10
del buf10
triton_poi_fused_convolution_relu_0[grid(262144)](buf11, primals_13,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_13
buf12 = extern_kernels.convolution(buf11, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_5[grid(16384)](buf13, primals_15,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_15
return (buf13, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, buf1, buf3, buf5, buf7, buf9, buf11
)
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
self.layers = nn.Sequential()
self.layers.add_module('Conv3', nn.Conv2d(64, 32, 3, 1, 1))
self.layers.add_module('Act3', nn.ReLU(inplace=True))
self.layers.add_module('Conv4', nn.Conv2d(32, 16, 3, 1, 1))
self.layers.add_module('Act4', nn.ReLU(inplace=True))
self.layers.add_module('Conv5', nn.Conv2d(16, 1, 3, 1, 1))
def forward(self, x):
return self.layers(x)
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
self.Conv1 = nn.Conv2d(1, 16, 3, 1, 1)
self.Relu = nn.ReLU(inplace=True)
self.layers = nn.ModuleDict({'DenseConv1': nn.Conv2d(16, 16, 3, 1,
1), 'DenseConv2': nn.Conv2d(32, 16, 3, 1, 1), 'DenseConv3': nn.
Conv2d(48, 16, 3, 1, 1)})
def forward(self, x):
x = self.Relu(self.Conv1(x))
for i in range(len(self.layers)):
out = self.layers['DenseConv' + str(i + 1)](x)
x = torch.cat([x, out], 1)
return x
class architectureNew(nn.Module):
def __init__(self):
super(architectureNew, self).__init__()
self.encoder = Encoder()
self.decoder = Decoder()
def forward(self, input_0):
primals_1 = self.encoder.Conv1.weight
primals_2 = self.encoder.Conv1.bias
primals_4 = self.encoder.layers.DenseConv1.weight
primals_5 = self.encoder.layers.DenseConv1.bias
primals_6 = self.encoder.layers.DenseConv2.weight
primals_7 = self.encoder.layers.DenseConv2.bias
primals_8 = self.encoder.layers.DenseConv3.weight
primals_9 = self.encoder.layers.DenseConv3.bias
primals_10 = self.decoder.layers.Conv3.weight
primals_11 = self.decoder.layers.Conv3.bias
primals_12 = self.decoder.layers.Conv4.weight
primals_13 = self.decoder.layers.Conv4.bias
primals_14 = self.decoder.layers.Conv5.weight
primals_15 = self.decoder.layers.Conv5.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15])
return output[0]
| VarunBabbar/Image_Compressor | architecture | false | 1,188 | [
"MIT"
] | 0 | 254d8d411f7cd16f3ce242275532c9fca537269c | https://github.com/VarunBabbar/Image_Compressor/tree/254d8d411f7cd16f3ce242275532c9fca537269c | import torch
import torch.nn as nn
class Decoder(nn.Module):
def __init__(self):
super().__init__()
self.layers = nn.Sequential()
self.layers.add_module('Conv3', nn.Conv2d(64, 32, 3, 1, 1))
self.layers.add_module('Act3', nn.ReLU(inplace=True))
self.layers.add_module('Conv4', nn.Conv2d(32, 16, 3, 1, 1))
self.layers.add_module('Act4', nn.ReLU(inplace=True))
self.layers.add_module('Conv5', nn.Conv2d(16, 1, 3, 1, 1))
def forward(self, x):
return self.layers(x)
class Encoder(nn.Module):
def __init__(self):
super().__init__()
self.Conv1 = nn.Conv2d(1, 16, 3, 1, 1)
self.Relu = nn.ReLU(inplace=True)
self.layers = nn.ModuleDict({'DenseConv1': nn.Conv2d(16, 16, 3, 1,
1), 'DenseConv2': nn.Conv2d(32, 16, 3, 1, 1), 'DenseConv3': nn.
Conv2d(48, 16, 3, 1, 1)})
def forward(self, x):
x = self.Relu(self.Conv1(x))
for i in range(len(self.layers)):
out = self.layers['DenseConv' + str(i + 1)](x)
x = torch.cat([x, out], 1)
return x
class Model(nn.Module):
def __init__(self):
super().__init__()
self.encoder = Encoder()
self.decoder = Decoder()
def forward(self, x):
return self.decoder(self.encoder(x))
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return []
|
SReLU | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ve/cvekibnibp2jzomh3kuv73u2liqkd3ap4mdsxspqzvopkso2ubyb.py
# Topologically Sorted Source Nodes: [ge, float_1, add, mul, add_1, mul_1, lt, float_2, gt, float_3, mul_2, mul_3, add_2, le, float_4, add_3, mul_4, add_4, mul_5, add_5], Original ATen: [aten.ge, aten._to_copy, aten.add, aten.mul, aten.lt, aten.gt, aten.le]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# add_3 => add_3
# add_4 => add_4
# add_5 => add_5
# float_1 => convert_element_type
# float_2 => convert_element_type_1
# float_3 => convert_element_type_2
# float_4 => convert_element_type_3
# ge => ge
# gt => gt
# le => le
# lt => lt
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# mul_5 => mul_5
# Graph fragment:
# %ge : [num_users=2] = call_function[target=torch.ops.aten.ge.Tensor](args = (%primals_2, %primals_1), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%ge, torch.float32), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %primals_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %add), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %mul), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type, %add_1), kwargs = {})
# %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Tensor](args = (%primals_2, %primals_1), kwargs = {})
# %convert_element_type_1 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%lt, torch.float32), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Tensor](args = (%primals_2, %primals_4), kwargs = {})
# %convert_element_type_2 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%gt, torch.float32), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_1, %convert_element_type_2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %primals_2), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_3), kwargs = {})
# %le : [num_users=2] = call_function[target=torch.ops.aten.le.Tensor](args = (%primals_2, %primals_4), kwargs = {})
# %convert_element_type_3 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%le, torch.float32), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %primals_4), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_5, %add_3), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_4, %mul_4), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_3, %add_4), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %mul_5), kwargs = {})
triton_poi_fused__to_copy_add_ge_gt_le_lt_mul_0 = async_compile.triton('triton_poi_fused__to_copy_add_ge_gt_le_lt_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*i1', 6: '*i1', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_ge_gt_le_lt_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_ge_gt_le_lt_mul_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 >= tmp1
tmp4 = tmp0 <= tmp3
tmp5 = tmp2.to(tl.float32)
tmp7 = tmp0 + tmp1
tmp8 = tmp6 * tmp7
tmp9 = tmp1 + tmp8
tmp10 = tmp5 * tmp9
tmp11 = tmp0 < tmp1
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp0 > tmp3
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp12 * tmp14
tmp16 = tmp15 * tmp0
tmp17 = tmp10 + tmp16
tmp18 = tmp4.to(tl.float32)
tmp20 = tmp0 + tmp3
tmp21 = tmp19 * tmp20
tmp22 = tmp3 + tmp21
tmp23 = tmp18 * tmp22
tmp24 = tmp17 + tmp23
tl.store(out_ptr0 + (x2), tmp2, xmask)
tl.store(out_ptr1 + (x2), tmp4, xmask)
tl.store(out_ptr2 + (x2), tmp24, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [ge, float_1, add, mul, add_1, mul_1, lt, float_2, gt, float_3, mul_2, mul_3, add_2, le, float_4, add_3, mul_4, add_4, mul_5, add_5], Original ATen: [aten.ge, aten._to_copy, aten.add, aten.mul, aten.lt, aten.gt, aten.le]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy_add_ge_gt_le_lt_mul_0.run(primals_2, primals_1, primals_4, primals_3, primals_5, buf0, buf1, buf2, 256, grid=grid(256), stream=stream0)
return (buf2, primals_1, primals_2, primals_3, primals_4, primals_5, buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from torch.nn.parameter import Parameter
class SReLU(nn.Module):
"""
SReLU (S-shaped Rectified Linear Activation Unit): a combination of three linear functions, which perform mapping R → R with the following formulation:
.. math::
h(x_i) = \\left\\{\\begin{matrix} t_i^r + a_i^r(x_i - t_i^r), x_i \\geq t_i^r \\\\ x_i, t_i^r > x_i > t_i^l\\\\ t_i^l + a_i^l(x_i - t_i^l), x_i \\leq t_i^l \\\\ \\end{matrix}\\right.
with 4 trainable parameters.
Shape:
- Input: (N, *) where * means, any number of additional
dimensions
- Output: (N, *), same shape as the input
Parameters:
.. math:: \\{t_i^r, a_i^r, t_i^l, a_i^l\\}
4 trainable parameters, which model an individual SReLU activation unit. The subscript i indicates that we allow SReLU to vary in different channels. Parameters can be initialized manually or randomly.
References:
- See SReLU paper:
https://arxiv.org/pdf/1512.07030.pdf
Examples:
>>> srelu_activation = srelu((2,2))
>>> t = torch.randn((2,2), dtype=torch.float, requires_grad = True)
>>> output = srelu_activation(t)
"""
def __init__(self, in_features, parameters=None):
"""
Initialization.
INPUT:
- in_features: shape of the input
- parameters: (tr, tl, ar, al) parameters for manual initialization, default value is None. If None is passed, parameters are initialized randomly.
"""
super(SReLU, self).__init__()
self.in_features = in_features
if parameters is None:
self.tr = Parameter(torch.randn(in_features, dtype=torch.float,
requires_grad=True))
self.tl = Parameter(torch.randn(in_features, dtype=torch.float,
requires_grad=True))
self.ar = Parameter(torch.randn(in_features, dtype=torch.float,
requires_grad=True))
self.al = Parameter(torch.randn(in_features, dtype=torch.float,
requires_grad=True))
else:
self.tr, self.tl, self.ar, self.al = parameters
def forward(self, x):
"""
Forward pass of the function
"""
return (x >= self.tr).float() * (self.tr + self.ar * (x + self.tr)) + (
x < self.tr).float() * (x > self.tl).float() * x + (x <= self.tl
).float() * (self.tl + self.al * (x + self.tl))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_add_ge_gt_le_lt_mul_0(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 >= tmp1
tmp4 = tmp0 <= tmp3
tmp5 = tmp2.to(tl.float32)
tmp7 = tmp0 + tmp1
tmp8 = tmp6 * tmp7
tmp9 = tmp1 + tmp8
tmp10 = tmp5 * tmp9
tmp11 = tmp0 < tmp1
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp0 > tmp3
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp12 * tmp14
tmp16 = tmp15 * tmp0
tmp17 = tmp10 + tmp16
tmp18 = tmp4.to(tl.float32)
tmp20 = tmp0 + tmp3
tmp21 = tmp19 * tmp20
tmp22 = tmp3 + tmp21
tmp23 = tmp18 * tmp22
tmp24 = tmp17 + tmp23
tl.store(out_ptr0 + x2, tmp2, xmask)
tl.store(out_ptr1 + x2, tmp4, xmask)
tl.store(out_ptr2 + x2, tmp24, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__to_copy_add_ge_gt_le_lt_mul_0[grid(256)](primals_2,
primals_1, primals_4, primals_3, primals_5, buf0, buf1, buf2,
256, XBLOCK=128, num_warps=4, num_stages=1)
return (buf2, primals_1, primals_2, primals_3, primals_4, primals_5,
buf0, buf1)
class SReLUNew(nn.Module):
"""
SReLU (S-shaped Rectified Linear Activation Unit): a combination of three linear functions, which perform mapping R → R with the following formulation:
.. math::
h(x_i) = \\left\\{\\begin{matrix} t_i^r + a_i^r(x_i - t_i^r), x_i \\geq t_i^r \\\\ x_i, t_i^r > x_i > t_i^l\\\\ t_i^l + a_i^l(x_i - t_i^l), x_i \\leq t_i^l \\\\ \\end{matrix}\\right.
with 4 trainable parameters.
Shape:
- Input: (N, *) where * means, any number of additional
dimensions
- Output: (N, *), same shape as the input
Parameters:
.. math:: \\{t_i^r, a_i^r, t_i^l, a_i^l\\}
4 trainable parameters, which model an individual SReLU activation unit. The subscript i indicates that we allow SReLU to vary in different channels. Parameters can be initialized manually or randomly.
References:
- See SReLU paper:
https://arxiv.org/pdf/1512.07030.pdf
Examples:
>>> srelu_activation = srelu((2,2))
>>> t = torch.randn((2,2), dtype=torch.float, requires_grad = True)
>>> output = srelu_activation(t)
"""
def __init__(self, in_features, parameters=None):
"""
Initialization.
INPUT:
- in_features: shape of the input
- parameters: (tr, tl, ar, al) parameters for manual initialization, default value is None. If None is passed, parameters are initialized randomly.
"""
super(SReLUNew, self).__init__()
self.in_features = in_features
if parameters is None:
self.tr = Parameter(torch.randn(in_features, dtype=torch.float,
requires_grad=True))
self.tl = Parameter(torch.randn(in_features, dtype=torch.float,
requires_grad=True))
self.ar = Parameter(torch.randn(in_features, dtype=torch.float,
requires_grad=True))
self.al = Parameter(torch.randn(in_features, dtype=torch.float,
requires_grad=True))
else:
self.tr, self.tl, self.ar, self.al = parameters
def forward(self, input_0):
primals_1 = self.tr
primals_3 = self.tl
primals_4 = self.ar
primals_5 = self.al
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| Venkateshwar2506/Echo | SReLU | false | 1,189 | [
"MIT"
] | 0 | 5d236b25ee4900754f48e0a865e1bf1ae9183875 | https://github.com/Venkateshwar2506/Echo/tree/5d236b25ee4900754f48e0a865e1bf1ae9183875 | import torch
from torch import nn
from torch.nn.parameter import Parameter
class Model(nn.Module):
"""
SReLU (S-shaped Rectified Linear Activation Unit): a combination of three linear functions, which perform mapping R → R with the following formulation:
.. math::
h(x_i) = \\left\\{\\begin{matrix} t_i^r + a_i^r(x_i - t_i^r), x_i \\geq t_i^r \\\\ x_i, t_i^r > x_i > t_i^l\\\\ t_i^l + a_i^l(x_i - t_i^l), x_i \\leq t_i^l \\\\ \\end{matrix}\\right.
with 4 trainable parameters.
Shape:
- Input: (N, *) where * means, any number of additional
dimensions
- Output: (N, *), same shape as the input
Parameters:
.. math:: \\{t_i^r, a_i^r, t_i^l, a_i^l\\}
4 trainable parameters, which model an individual SReLU activation unit. The subscript i indicates that we allow SReLU to vary in different channels. Parameters can be initialized manually or randomly.
References:
- See SReLU paper:
https://arxiv.org/pdf/1512.07030.pdf
Examples:
>>> srelu_activation = srelu((2,2))
>>> t = torch.randn((2,2), dtype=torch.float, requires_grad = True)
>>> output = srelu_activation(t)
"""
def __init__(self, in_features, parameters=None):
"""
Initialization.
INPUT:
- in_features: shape of the input
- parameters: (tr, tl, ar, al) parameters for manual initialization, default value is None. If None is passed, parameters are initialized randomly.
"""
super().__init__()
self.in_features = in_features
if parameters is None:
self.tr = Parameter(torch.randn(in_features, dtype=torch.float,
requires_grad=True))
self.tl = Parameter(torch.randn(in_features, dtype=torch.float,
requires_grad=True))
self.ar = Parameter(torch.randn(in_features, dtype=torch.float,
requires_grad=True))
self.al = Parameter(torch.randn(in_features, dtype=torch.float,
requires_grad=True))
else:
self.tr, self.tl, self.ar, self.al = parameters
def forward(self, x):
"""
Forward pass of the function
"""
return (x >= self.tr).float() * (self.tr + self.ar * (x + self.tr)) + (
x < self.tr).float() * (x > self.tl).float() * x + (x <= self.tl
).float() * (self.tl + self.al * (x + self.tl))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
ResnetBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ar/carphsywpk5bgy7r2tzdgvw3mvatf5msx5wznvmeu6brbiqy5y5e.py
# Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.max_pool2d_with_indices, aten.relu]
# Source node to ATen node mapping:
# out_1 => _low_memory_max_pool2d_with_offsets, getitem_1
# out_2 => relu
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%convolution, [3, 3], [2, 2], [1, 1], [1, 1], False), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
# %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%getitem,), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_relu_0 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 2) % 2
x0 = xindex % 2
x4 = (xindex // 2)
x3 = xindex
tmp0 = (-1) + (2*x1)
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = (-1) + (2*x0)
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + ((-5) + (2*x0) + (8*x4)), tmp10 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp12 = 2*x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + ((-4) + (2*x0) + (8*x4)), tmp16 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + (2*x0)
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + ((-3) + (2*x0) + (8*x4)), tmp23 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2*x1
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + ((-1) + (2*x0) + (8*x4)), tmp30 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + ((2*x0) + (8*x4)), tmp33 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (1 + (2*x0) + (8*x4)), tmp36 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + (2*x1)
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (3 + (2*x0) + (8*x4)), tmp43 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4 + (2*x0) + (8*x4)), tmp46 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (5 + (2*x0) + (8*x4)), tmp49 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tmp17 > tmp11
tmp53 = tl.full([1], 1, tl.int8)
tmp54 = tl.full([1], 0, tl.int8)
tmp55 = tl.where(tmp52, tmp53, tmp54)
tmp56 = tmp24 > tmp18
tmp57 = tl.full([1], 2, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp31 > tmp25
tmp60 = tl.full([1], 3, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp34 > tmp32
tmp63 = tl.full([1], 4, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp37 > tmp35
tmp66 = tl.full([1], 5, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp44 > tmp38
tmp69 = tl.full([1], 6, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp47 > tmp45
tmp72 = tl.full([1], 7, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp50 > tmp48
tmp75 = tl.full([1], 8, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tmp77 = tl.full([1], 0, tl.int32)
tmp78 = triton_helpers.maximum(tmp77, tmp51)
tl.store(out_ptr0 + (x3), tmp76, xmask)
tl.store(in_out_ptr0 + (x3), tmp78, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/wu/cwuhjqrxupg6y2xkpm7lucxrqveop2y2vttpjm35nj72mklqdbod.py
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# out_3 => relu_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/jq/cjq7l4v5omlfav6zrzt3gv7wppwv3n4lqupgabiefuwarylj456k.py
# Topologically Sorted Source Nodes: [out_4, out_5], Original ATen: [aten.add, aten.relu]
# Source node to ATen node mapping:
# out_4 => add
# out_5 => relu_2
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_2, %relu), kwargs = {})
# %relu_2 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {})
triton_poi_fused_add_relu_2 = async_compile.triton('triton_poi_fused_add_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/f5/cf5hufnimi2lmysvvc6zuqucotowluvb3zukslqhkjvxlz5ra3u7.py
# Topologically Sorted Source Nodes: [out_7], Original ATen: [aten.add]
# Source node to ATen node mapping:
# out_7 => add_1
# Graph fragment:
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_4, %relu_2), kwargs = {})
triton_poi_fused_add_3 = async_compile.triton('triton_poi_fused_add_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.int8)
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.max_pool2d_with_indices, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_relu_0.run(buf3, buf0, buf2, 64, grid=grid(64), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 2, 2), (16, 4, 2, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf5, 64, grid=grid(64), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 2, 2), (16, 4, 2, 1))
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [out_4, out_5], Original ATen: [aten.add, aten.relu]
triton_poi_fused_add_relu_2.run(buf7, buf3, 64, grid=grid(64), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 2, 2), (16, 4, 2, 1))
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [out_6], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf9, 64, grid=grid(64), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf9, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 4, 2, 2), (16, 4, 2, 1))
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [out_7], Original ATen: [aten.add]
triton_poi_fused_add_3.run(buf11, buf7, 64, grid=grid(64), stream=stream0)
return (buf11, primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, buf0, buf2, buf3, buf5, buf7, buf9, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class ResnetBlock(nn.Module):
def __init__(self, in_channels, out_channels, use_bn=False):
super(ResnetBlock, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.use_bn = use_bn
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3,
stride=1, padding=1, bias=False)
self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3,
stride=1, padding=1, bias=False)
self.conv3 = nn.Conv2d(out_channels, out_channels, kernel_size=3,
stride=1, padding=1, bias=False)
self.conv4 = nn.Conv2d(out_channels, out_channels, kernel_size=3,
stride=1, padding=1, bias=False)
self.conv5 = nn.Conv2d(out_channels, out_channels, kernel_size=3,
stride=1, padding=1, bias=False)
if self.use_bn:
self.bn1 = nn.BatchNorm2d(out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
self.bn3 = nn.BatchNorm2d(out_channels)
self.bn4 = nn.BatchNorm2d(out_channels)
self.bn5 = nn.BatchNorm2d(out_channels)
def forward(self, x):
out = self.conv1(x)
if self.use_bn:
out = self.bn1(x)
out = self.pool1(out)
identity = out
out = self.relu(out)
if self.use_bn:
out = self.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out)) + identity
else:
out = self.relu(self.conv2(out))
out = self.conv3(out) + identity
identity = out
out = self.relu(out)
if self.use_bn:
out = self.relu(self.bn4(self.conv4(out)))
out = self.bn5(self.conv5(out)) + identity
else:
out = self.relu(self.conv4(out))
out = self.conv5(out) + identity
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 2 % 2
x0 = xindex % 2
x4 = xindex // 2
x3 = xindex
tmp0 = -1 + 2 * x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + 2 * x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x4), tmp10 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp12 = 2 * x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x4), tmp16 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + 2 * x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-3 + 2 * x0 + 8 * x4), tmp23 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2 * x1
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x4), tmp30 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (2 * x0 + 8 * x4), tmp33 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x4), tmp36 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + 2 * x1
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (3 + 2 * x0 + 8 * x4), tmp43 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x4), tmp46 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x4), tmp49 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tmp17 > tmp11
tmp53 = tl.full([1], 1, tl.int8)
tmp54 = tl.full([1], 0, tl.int8)
tmp55 = tl.where(tmp52, tmp53, tmp54)
tmp56 = tmp24 > tmp18
tmp57 = tl.full([1], 2, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp31 > tmp25
tmp60 = tl.full([1], 3, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp34 > tmp32
tmp63 = tl.full([1], 4, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp37 > tmp35
tmp66 = tl.full([1], 5, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp44 > tmp38
tmp69 = tl.full([1], 6, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp47 > tmp45
tmp72 = tl.full([1], 7, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp50 > tmp48
tmp75 = tl.full([1], 8, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tmp77 = tl.full([1], 0, tl.int32)
tmp78 = triton_helpers.maximum(tmp77, tmp51)
tl.store(out_ptr0 + x3, tmp76, xmask)
tl.store(in_out_ptr0 + x3, tmp78, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.int8)
buf3 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_relu_0[grid(64)](buf3,
buf0, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 2, 2), (16, 4, 2, 1))
buf5 = buf4
del buf4
triton_poi_fused_relu_1[grid(64)](buf5, 64, XBLOCK=64, num_warps=1,
num_stages=1)
buf6 = extern_kernels.convolution(buf5, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 2, 2), (16, 4, 2, 1))
buf7 = buf6
del buf6
triton_poi_fused_add_relu_2[grid(64)](buf7, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf8 = extern_kernels.convolution(buf7, primals_5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 2, 2), (16, 4, 2, 1))
buf9 = buf8
del buf8
triton_poi_fused_relu_1[grid(64)](buf9, 64, XBLOCK=64, num_warps=1,
num_stages=1)
buf10 = extern_kernels.convolution(buf9, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 4, 2, 2), (16, 4, 2, 1))
buf11 = buf10
del buf10
triton_poi_fused_add_3[grid(64)](buf11, buf7, 64, XBLOCK=64,
num_warps=1, num_stages=1)
return (buf11, primals_1, primals_2, primals_3, primals_4, primals_5,
primals_6, buf0, buf2, buf3, buf5, buf7, buf9)
class ResnetBlockNew(nn.Module):
def __init__(self, in_channels, out_channels, use_bn=False):
super(ResnetBlockNew, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.use_bn = use_bn
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3,
stride=1, padding=1, bias=False)
self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3,
stride=1, padding=1, bias=False)
self.conv3 = nn.Conv2d(out_channels, out_channels, kernel_size=3,
stride=1, padding=1, bias=False)
self.conv4 = nn.Conv2d(out_channels, out_channels, kernel_size=3,
stride=1, padding=1, bias=False)
self.conv5 = nn.Conv2d(out_channels, out_channels, kernel_size=3,
stride=1, padding=1, bias=False)
if self.use_bn:
self.bn1 = nn.BatchNorm2d(out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
self.bn3 = nn.BatchNorm2d(out_channels)
self.bn4 = nn.BatchNorm2d(out_channels)
self.bn5 = nn.BatchNorm2d(out_channels)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_3 = self.conv2.weight
primals_4 = self.conv3.weight
primals_5 = self.conv4.weight
primals_6 = self.conv5.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| VashishtMadhavan/pytorch-maml-rl | ResnetBlock | false | 1,190 | [
"MIT"
] | 0 | d8821b8374d973869bb6a1393f1b2c369c9a664b | https://github.com/VashishtMadhavan/pytorch-maml-rl/tree/d8821b8374d973869bb6a1393f1b2c369c9a664b | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, in_channels, out_channels, use_bn=False):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.use_bn = use_bn
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3,
stride=1, padding=1, bias=False)
self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3,
stride=1, padding=1, bias=False)
self.conv3 = nn.Conv2d(out_channels, out_channels, kernel_size=3,
stride=1, padding=1, bias=False)
self.conv4 = nn.Conv2d(out_channels, out_channels, kernel_size=3,
stride=1, padding=1, bias=False)
self.conv5 = nn.Conv2d(out_channels, out_channels, kernel_size=3,
stride=1, padding=1, bias=False)
if self.use_bn:
self.bn1 = nn.BatchNorm2d(out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
self.bn3 = nn.BatchNorm2d(out_channels)
self.bn4 = nn.BatchNorm2d(out_channels)
self.bn5 = nn.BatchNorm2d(out_channels)
def forward(self, x):
out = self.conv1(x)
if self.use_bn:
out = self.bn1(x)
out = self.pool1(out)
identity = out
out = self.relu(out)
if self.use_bn:
out = self.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out)) + identity
else:
out = self.relu(self.conv2(out))
out = self.conv3(out) + identity
identity = out
out = self.relu(out)
if self.use_bn:
out = self.relu(self.bn4(self.conv4(out)))
out = self.bn5(self.conv5(out)) + identity
else:
out = self.relu(self.conv4(out))
out = self.conv5(out) + identity
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
ODEfunc | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ew/cewlcpr2jhkktbpmzbbjxdsiykdntmypm237lc34qynaxm2ln5ee.py
# Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.native_group_norm, aten.relu]
# Source node to ATen node mapping:
# out => add, add_1, mul_1, rsqrt, var_mean
# out_1 => relu
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_5), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_2), kwargs = {})
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%add_1,), kwargs = {})
triton_per_fused_native_group_norm_relu_0 = async_compile.triton('triton_per_fused_native_group_norm_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_native_group_norm_relu_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
x3 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 16.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp22 = tmp0 - tmp10
tmp23 = tmp22 * tmp21
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = tl.full([1, 1], 0, tl.int32)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp21, xmask)
tl.store(out_ptr1 + (r1 + (16*x2) + (80*x3)), tmp29, xmask)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/yl/cyltj4xe7bwa5jmotmsxfdzwedvvrytkhaf3f2qw62sd4zn5rnro.py
# Topologically Sorted Source Nodes: [ttx, ttx_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# ttx => cat
# ttx_1 => cat_1
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_4, %relu], 1), kwargs = {})
# %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_4, %relu_1], 1), kwargs = {})
triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
x1 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tl.store(out_ptr0 + (x0 + (80*x1)), tmp0, xmask)
tl.store(out_ptr1 + (x0 + (80*x1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/mr/cmr56lkwxw77qikvfa54yx4b56plsu5zod4pwpjjr4x2wgpvy3h6.py
# Topologically Sorted Source Nodes: [out_2, out_3, out_4], Original ATen: [aten.convolution, aten.native_group_norm, aten.relu]
# Source node to ATen node mapping:
# out_2 => convolution
# out_3 => add_2, add_3, mul_4, rsqrt_1, var_mean_1
# out_4 => relu_1
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_5, %primals_6, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_2, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, %unsqueeze_11), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %unsqueeze_8), kwargs = {})
# %relu_1 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%add_3,), kwargs = {})
triton_per_fused_convolution_native_group_norm_relu_2 = async_compile.triton('triton_per_fused_convolution_native_group_norm_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_convolution_native_group_norm_relu_2', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_convolution_native_group_norm_relu_2(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_out_ptr0 + (r2 + (16*x3)), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp18 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tmp24 = tmp2 - tmp12
tmp25 = tmp24 * tmp23
tmp27 = tmp25 * tmp26
tmp29 = tmp27 + tmp28
tmp30 = tl.full([1, 1], 0, tl.int32)
tmp31 = triton_helpers.maximum(tmp30, tmp29)
tl.store(in_out_ptr0 + (r2 + (16*x3)), tmp2, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x3), tmp23, xmask)
tl.store(out_ptr1 + (r2 + (16*x0) + (80*x1)), tmp31, xmask)
tl.store(out_ptr0 + (x3), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/lj/cljbnzt4e5mf4f235sbsd7nao5p35wmgsn35efjytvld4hyxvgz4.py
# Topologically Sorted Source Nodes: [out_5, out_6], Original ATen: [aten.convolution, aten.native_group_norm]
# Source node to ATen node mapping:
# out_5 => convolution_1
# out_6 => add_4, add_5, mul_7, rsqrt_2, var_mean_2
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_1, %primals_9, %primals_10, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean_2 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_4, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_4, 1e-05), kwargs = {})
# %rsqrt_2 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_5, %unsqueeze_17), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_7, %unsqueeze_14), kwargs = {})
triton_per_fused_convolution_native_group_norm_3 = async_compile.triton('triton_per_fused_convolution_native_group_norm_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_convolution_native_group_norm_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_convolution_native_group_norm_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + (16*x3)), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = tmp2 - tmp12
tmp20 = 16.0
tmp21 = tmp18 / tmp20
tmp22 = 1e-05
tmp23 = tmp21 + tmp22
tmp24 = libdevice.rsqrt(tmp23)
tmp25 = tmp19 * tmp24
tmp27 = tmp25 * tmp26
tmp29 = tmp27 + tmp28
tl.store(in_out_ptr0 + (r2 + (16*x3)), tmp2, xmask)
tl.store(out_ptr2 + (r2 + (16*x3)), tmp29, xmask)
tl.store(out_ptr3 + (x3), tmp24, xmask)
tl.store(out_ptr0 + (x3), tmp12, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_5, (4, 5, 3, 3), (45, 9, 3, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, 5, 3, 3), (45, 9, 3, 1))
assert_size_stride(primals_10, (4, ), (1, ))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf3 = reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf1 # reuse
buf6 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32)
buf5 = reinterpret_tensor(buf6, (4, 4, 4, 4), (80, 16, 4, 1), 16) # alias
# Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.native_group_norm, aten.relu]
stream0 = get_raw_stream(0)
triton_per_fused_native_group_norm_relu_0.run(buf3, primals_3, primals_1, primals_2, buf0, buf5, 16, 16, grid=grid(16), stream=stream0)
buf4 = reinterpret_tensor(buf6, (4, 1, 4, 4), (80, 16, 4, 1), 0) # alias
buf15 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32)
buf13 = reinterpret_tensor(buf15, (4, 1, 4, 4), (80, 16, 4, 1), 0) # alias
# Topologically Sorted Source Nodes: [ttx, ttx_1], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(primals_4, buf4, buf13, 64, grid=grid(64), stream=stream0)
del primals_4
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(buf6, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1))
buf8 = buf7; del buf7 # reuse
buf9 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
buf10 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf12 = reinterpret_tensor(buf10, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf10 # reuse
buf14 = reinterpret_tensor(buf15, (4, 4, 4, 4), (80, 16, 4, 1), 16) # alias
# Topologically Sorted Source Nodes: [out_2, out_3, out_4], Original ATen: [aten.convolution, aten.native_group_norm, aten.relu]
triton_per_fused_convolution_native_group_norm_relu_2.run(buf8, buf12, primals_6, primals_7, primals_8, buf9, buf14, 16, 16, grid=grid(16), stream=stream0)
del primals_6
# Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.convolution]
buf16 = extern_kernels.convolution(buf15, primals_9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 4, 4, 4), (64, 16, 4, 1))
buf17 = buf16; del buf16 # reuse
buf18 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf21 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf22 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [out_5, out_6], Original ATen: [aten.convolution, aten.native_group_norm]
triton_per_fused_convolution_native_group_norm_3.run(buf17, primals_10, primals_11, primals_12, buf18, buf21, buf22, 16, 16, grid=grid(16), stream=stream0)
del primals_10
del primals_12
return (buf21, primals_1, primals_2, primals_3, primals_5, primals_7, primals_8, primals_9, primals_11, buf0, buf3, buf6, buf8, buf9, buf12, buf15, buf17, reinterpret_tensor(buf18, (4, 4), (4, 1), 0), reinterpret_tensor(buf22, (4, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 1, 4, 4), (16, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 5, 3, 3), (45, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 5, 3, 3), (45, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def norm(dim):
return nn.GroupNorm(min(32, dim), dim)
class ConcatConv2d(nn.Module):
def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0,
dilation=1, groups=1, bias=True, transpose=False):
super(ConcatConv2d, self).__init__()
module = nn.ConvTranspose2d if transpose else nn.Conv2d
self._layer = module(dim_in + 1, dim_out, kernel_size=ksize, stride
=stride, padding=padding, dilation=dilation, groups=groups,
bias=bias)
def forward(self, t, x):
tt = torch.ones_like(x[:, :1, :, :]) * t
ttx = torch.cat([tt, x], 1)
return self._layer(ttx)
class ODEfunc(nn.Module):
def __init__(self, dim):
super(ODEfunc, self).__init__()
self.norm1 = norm(dim)
self.relu = nn.ReLU(inplace=True)
self.conv1 = ConcatConv2d(dim, dim, 3, 1, 1)
self.norm2 = norm(dim)
self.conv2 = ConcatConv2d(dim, dim, 3, 1, 1)
self.norm3 = norm(dim)
self.nfe = 0
def forward(self, t, x):
self.nfe += 1
out = self.norm1(x)
out = self.relu(out)
out = self.conv1(t, out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv2(t, out)
out = self.norm3(out)
return out
def get_inputs():
return [torch.rand([4, 1, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_native_group_norm_relu_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
x3 = xindex // 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 16.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp22 = tmp0 - tmp10
tmp23 = tmp22 * tmp21
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = tl.full([1, 1], 0, tl.int32)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(out_ptr1 + (r1 + 16 * x2 + 80 * x3), tmp29, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tl.store(out_ptr0 + (x0 + 80 * x1), tmp0, xmask)
tl.store(out_ptr1 + (x0 + 80 * x1), tmp0, xmask)
@triton.jit
def triton_per_fused_convolution_native_group_norm_relu_2(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp18 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tmp24 = tmp2 - tmp12
tmp25 = tmp24 * tmp23
tmp27 = tmp25 * tmp26
tmp29 = tmp27 + tmp28
tmp30 = tl.full([1, 1], 0, tl.int32)
tmp31 = triton_helpers.maximum(tmp30, tmp29)
tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x3, tmp23, xmask)
tl.store(out_ptr1 + (r2 + 16 * x0 + 80 * x1), tmp31, xmask)
tl.store(out_ptr0 + x3, tmp12, xmask)
@triton.jit
def triton_per_fused_convolution_native_group_norm_3(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = tmp2 - tmp12
tmp20 = 16.0
tmp21 = tmp18 / tmp20
tmp22 = 1e-05
tmp23 = tmp21 + tmp22
tmp24 = libdevice.rsqrt(tmp23)
tmp25 = tmp19 * tmp24
tmp27 = tmp25 * tmp26
tmp29 = tmp27 + tmp28
tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask)
tl.store(out_ptr2 + (r2 + 16 * x3), tmp29, xmask)
tl.store(out_ptr3 + x3, tmp24, xmask)
tl.store(out_ptr0 + x3, tmp12, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_5, (4, 5, 3, 3), (45, 9, 3, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 5, 3, 3), (45, 9, 3, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf3 = reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf1
buf6 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32)
buf5 = reinterpret_tensor(buf6, (4, 4, 4, 4), (80, 16, 4, 1), 16)
get_raw_stream(0)
triton_per_fused_native_group_norm_relu_0[grid(16)](buf3, primals_3,
primals_1, primals_2, buf0, buf5, 16, 16, XBLOCK=8, num_warps=2,
num_stages=1)
buf4 = reinterpret_tensor(buf6, (4, 1, 4, 4), (80, 16, 4, 1), 0)
buf15 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32)
buf13 = reinterpret_tensor(buf15, (4, 1, 4, 4), (80, 16, 4, 1), 0)
triton_poi_fused_cat_1[grid(64)](primals_4, buf4, buf13, 64, XBLOCK
=64, num_warps=1, num_stages=1)
del primals_4
buf7 = extern_kernels.convolution(buf6, primals_5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1))
buf8 = buf7
del buf7
buf9 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
buf10 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf12 = reinterpret_tensor(buf10, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf10
buf14 = reinterpret_tensor(buf15, (4, 4, 4, 4), (80, 16, 4, 1), 16)
triton_per_fused_convolution_native_group_norm_relu_2[grid(16)](buf8,
buf12, primals_6, primals_7, primals_8, buf9, buf14, 16, 16,
XBLOCK=1, num_warps=2, num_stages=1)
del primals_6
buf16 = extern_kernels.convolution(buf15, primals_9, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 4, 4, 4), (64, 16, 4, 1))
buf17 = buf16
del buf16
buf18 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf21 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf22 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
triton_per_fused_convolution_native_group_norm_3[grid(16)](buf17,
primals_10, primals_11, primals_12, buf18, buf21, buf22, 16, 16,
XBLOCK=8, num_warps=2, num_stages=1)
del primals_10
del primals_12
return (buf21, primals_1, primals_2, primals_3, primals_5, primals_7,
primals_8, primals_9, primals_11, buf0, buf3, buf6, buf8, buf9,
buf12, buf15, buf17, reinterpret_tensor(buf18, (4, 4), (4, 1), 0),
reinterpret_tensor(buf22, (4, 4), (4, 1), 0))
def norm(dim):
return nn.GroupNorm(min(32, dim), dim)
class ConcatConv2d(nn.Module):
def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0,
dilation=1, groups=1, bias=True, transpose=False):
super(ConcatConv2d, self).__init__()
module = nn.ConvTranspose2d if transpose else nn.Conv2d
self._layer = module(dim_in + 1, dim_out, kernel_size=ksize, stride
=stride, padding=padding, dilation=dilation, groups=groups,
bias=bias)
def forward(self, t, x):
tt = torch.ones_like(x[:, :1, :, :]) * t
ttx = torch.cat([tt, x], 1)
return self._layer(ttx)
class ODEfuncNew(nn.Module):
def __init__(self, dim):
super(ODEfuncNew, self).__init__()
self.norm1 = norm(dim)
self.relu = nn.ReLU(inplace=True)
self.conv1 = ConcatConv2d(dim, dim, 3, 1, 1)
self.norm2 = norm(dim)
self.conv2 = ConcatConv2d(dim, dim, 3, 1, 1)
self.norm3 = norm(dim)
self.nfe = 0
def forward(self, input_0, input_1):
primals_1 = self.norm1.weight
primals_2 = self.norm1.bias
primals_5 = self.conv1._layer.weight
primals_6 = self.conv1._layer.bias
primals_7 = self.norm2.weight
primals_8 = self.norm2.bias
primals_9 = self.conv2._layer.weight
primals_10 = self.conv2._layer.bias
primals_11 = self.norm3.weight
primals_12 = self.norm3.bias
primals_4 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
| TylerChoi1224/torchdiffeq | ODEfunc | false | 1,191 | [
"MIT"
] | 0 | 72f74d9651a58ab11cdadd60682f1b61e625ef53 | https://github.com/TylerChoi1224/torchdiffeq/tree/72f74d9651a58ab11cdadd60682f1b61e625ef53 | import torch
import torch.nn as nn
def norm(dim):
return nn.GroupNorm(min(32, dim), dim)
class ConcatConv2d(nn.Module):
def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0,
dilation=1, groups=1, bias=True, transpose=False):
super().__init__()
module = nn.ConvTranspose2d if transpose else nn.Conv2d
self._layer = module(dim_in + 1, dim_out, kernel_size=ksize, stride
=stride, padding=padding, dilation=dilation, groups=groups,
bias=bias)
def forward(self, t, x):
tt = torch.ones_like(x[:, :1, :, :]) * t
ttx = torch.cat([tt, x], 1)
return self._layer(ttx)
class Model(nn.Module):
def __init__(self, dim):
super().__init__()
self.norm1 = norm(dim)
self.relu = nn.ReLU(inplace=True)
self.conv1 = ConcatConv2d(dim, dim, 3, 1, 1)
self.norm2 = norm(dim)
self.conv2 = ConcatConv2d(dim, dim, 3, 1, 1)
self.norm3 = norm(dim)
self.nfe = 0
def forward(self, t, x):
self.nfe += 1
out = self.norm1(x)
out = self.relu(out)
out = self.conv1(t, out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv2(t, out)
out = self.norm3(out)
return out
def get_inputs():
return [torch.rand([4, 1, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Decoder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/sj/csj6uus7z5hpvi77pvgp63jx4bne5i65mpzpsuvveo3mzfov6ycm.py
# Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# input_1 => convolution
# input_2 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/qn/cqnqaq7kh6sypugb6bqfg74kezlshfvip2ipwvaogffif2deremo.py
# Topologically Sorted Source Nodes: [input_3, input_4], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# input_3 => convolution_1
# input_4 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/5p/c5p5qppzecxcvl2fokkcfo32s4cs6s4z3s2aeii7lx2jg75iaktc.py
# Topologically Sorted Source Nodes: [input_5], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# input_5 => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), None)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + (x0), tmp3, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 64, 64, 64), (262144, 4096, 64, 1))
assert_size_stride(primals_4, (16, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (1, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_7, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 524288, grid=grid(524288), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [input_3, input_4], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf3, primals_5, 262144, grid=grid(262144), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [input_5], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [input_5], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf5, primals_7, 16384, grid=grid(16384), stream=stream0)
del primals_7
return (buf5, primals_1, primals_3, primals_4, primals_6, buf1, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 64, 64, 64), (262144, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
self.layers = nn.Sequential()
self.layers.add_module('Conv3', nn.Conv2d(64, 32, 3, 1, 1))
self.layers.add_module('Act3', nn.ReLU(inplace=True))
self.layers.add_module('Conv4', nn.Conv2d(32, 16, 3, 1, 1))
self.layers.add_module('Act4', nn.ReLU(inplace=True))
self.layers.add_module('Conv5', nn.Conv2d(16, 1, 3, 1, 1))
def forward(self, x):
return self.layers(x)
def get_inputs():
return [torch.rand([4, 64, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 16
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 64, 64, 64), (262144, 4096, 64, 1))
assert_size_stride(primals_4, (16, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (1, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(524288)](buf1, primals_2,
524288, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(262144)](buf3, primals_5,
262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_2[grid(16384)](buf5, primals_7, 16384,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
return buf5, primals_1, primals_3, primals_4, primals_6, buf1, buf3
class DecoderNew(nn.Module):
def __init__(self):
super(DecoderNew, self).__init__()
self.layers = nn.Sequential()
self.layers.add_module('Conv3', nn.Conv2d(64, 32, 3, 1, 1))
self.layers.add_module('Act3', nn.ReLU(inplace=True))
self.layers.add_module('Conv4', nn.Conv2d(32, 16, 3, 1, 1))
self.layers.add_module('Act4', nn.ReLU(inplace=True))
self.layers.add_module('Conv5', nn.Conv2d(16, 1, 3, 1, 1))
def forward(self, input_0):
primals_1 = self.layers.Conv3.weight
primals_2 = self.layers.Conv3.bias
primals_4 = self.layers.Conv4.weight
primals_5 = self.layers.Conv4.bias
primals_6 = self.layers.Conv5.weight
primals_7 = self.layers.Conv5.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| VarunBabbar/Image_Compressor | Decoder | false | 1,192 | [
"MIT"
] | 0 | 254d8d411f7cd16f3ce242275532c9fca537269c | https://github.com/VarunBabbar/Image_Compressor/tree/254d8d411f7cd16f3ce242275532c9fca537269c | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
self.layers = nn.Sequential()
self.layers.add_module('Conv3', nn.Conv2d(64, 32, 3, 1, 1))
self.layers.add_module('Act3', nn.ReLU(inplace=True))
self.layers.add_module('Conv4', nn.Conv2d(32, 16, 3, 1, 1))
self.layers.add_module('Act4', nn.ReLU(inplace=True))
self.layers.add_module('Conv5', nn.Conv2d(16, 1, 3, 1, 1))
def forward(self, x):
return self.layers(x)
def get_inputs():
return [torch.rand([4, 64, 64, 64])]
def get_init_inputs():
return []
|
SEBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/l3/cl35tzbhrd24dhunkbb6gjs54aklpyr46oikqhoylcgmkcmhujil.py
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# attn => mean
# Graph fragment:
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1, -2], True), kwargs = {})
triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ad/cadccuyhl7stcp3nyqfgohiwbiv5ckfzxsye27ithwsill6dvmh4.py
# Topologically Sorted Source Nodes: [attn_1, attn_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# attn_1 => convolution
# attn_2 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mean, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tl.store(in_out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/4y/c4yo7julccljbsmoos6bmocbalgc5mg5loxwifzjugvih4yntd2h.py
# Topologically Sorted Source Nodes: [attn_3, mul, x, neg, result, neg_1, result_1], Original ATen: [aten.convolution, aten.mul, aten.add, aten.neg, aten.threshold]
# Source node to ATen node mapping:
# attn_3 => convolution_1
# mul => mul
# neg => neg
# neg_1 => neg_1
# result => full_default, le, where
# result_1 => le_1
# x => add
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 0.2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 0.5), kwargs = {})
# %neg : [num_users=2] = call_function[target=torch.ops.aten.neg.default](args = (%add,), kwargs = {})
# %le : [num_users=2] = call_function[target=torch.ops.aten.le.Scalar](args = (%neg, -1), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%le, %full_default, %neg), kwargs = {})
# %neg_1 : [num_users=2] = call_function[target=torch.ops.aten.neg.default](args = (%where,), kwargs = {})
# %le_1 : [num_users=2] = call_function[target=torch.ops.aten.le.Scalar](args = (%neg_1, 0), kwargs = {})
triton_poi_fused_add_convolution_mul_neg_threshold_2 = async_compile.triton('triton_poi_fused_add_convolution_mul_neg_threshold_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_mul_neg_threshold_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_mul_neg_threshold_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.2
tmp4 = tmp2 * tmp3
tmp5 = 0.5
tmp6 = tmp4 + tmp5
tmp7 = -tmp6
tmp8 = -1.0
tmp9 = tmp7 <= tmp8
tmp10 = tl.where(tmp9, tmp8, tmp7)
tmp11 = -tmp10
tmp12 = 0.0
tmp13 = tmp11 <= tmp12
tl.store(out_ptr0 + (x2), tmp9, xmask)
tl.store(out_ptr1 + (x2), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/wl/cwll37wdmbam3jlbpjfmh3gdmu7emnjjcxx4gsktivq2xh2ulr2s.py
# Topologically Sorted Source Nodes: [attn_3, mul, x, neg, result, neg_1, result_1, mul_1], Original ATen: [aten.convolution, aten.mul, aten.add, aten.neg, aten.threshold]
# Source node to ATen node mapping:
# attn_3 => convolution_1
# mul => mul
# mul_1 => mul_1
# neg => neg
# neg_1 => neg_1
# result => full_default, where
# result_1 => full_default_1, where_1
# x => add
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 0.2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 0.5), kwargs = {})
# %neg : [num_users=2] = call_function[target=torch.ops.aten.neg.default](args = (%add,), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%le, %full_default, %neg), kwargs = {})
# %neg_1 : [num_users=2] = call_function[target=torch.ops.aten.neg.default](args = (%where,), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%le_1, %full_default_1, %neg_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %where_1), kwargs = {})
triton_poi_fused_add_convolution_mul_neg_threshold_3 = async_compile.triton('triton_poi_fused_add_convolution_mul_neg_threshold_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*i1', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_mul_neg_threshold_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_mul_neg_threshold_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = (xindex // 16)
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last').to(tl.int1)
tmp2 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last').to(tl.int1)
tmp3 = tl.load(in_ptr3 + (x4), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tmp3 + tmp4
tmp6 = 0.2
tmp7 = tmp5 * tmp6
tmp8 = 0.5
tmp9 = tmp7 + tmp8
tmp10 = -tmp9
tmp11 = -1.0
tmp12 = tl.where(tmp2, tmp11, tmp10)
tmp13 = -tmp12
tmp14 = 0.0
tmp15 = tl.where(tmp1, tmp14, tmp13)
tmp16 = tmp0 * tmp15
tl.store(out_ptr0 + (x3), tmp16, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1, ), (1, ))
assert_size_stride(primals_4, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_0.run(buf1, primals_1, 16, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 1, 1, 1), (1, 1, 1, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [attn_1, attn_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf3, primals_3, 4, grid=grid(4), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [attn_3], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1))
buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
buf6 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
# Topologically Sorted Source Nodes: [attn_3, mul, x, neg, result, neg_1, result_1], Original ATen: [aten.convolution, aten.mul, aten.add, aten.neg, aten.threshold]
triton_poi_fused_add_convolution_mul_neg_threshold_2.run(buf4, primals_5, buf5, buf6, 16, grid=grid(16), stream=stream0)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_3, mul, x, neg, result, neg_1, result_1, mul_1], Original ATen: [aten.convolution, aten.mul, aten.add, aten.neg, aten.threshold]
triton_poi_fused_add_convolution_mul_neg_threshold_3.run(primals_1, buf6, buf5, buf4, primals_5, buf7, 256, grid=grid(256), stream=stream0)
del buf4
del primals_5
return (buf7, primals_1, primals_2, primals_4, buf1, buf3, buf5, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class HardSigmoid(nn.Module):
def __init__(self, slope=0.2, offset=0.5):
super().__init__()
self.slope = slope
self.offset = offset
def forward(self, x):
x = self.slope * x + self.offset
x = F.threshold(-x, -1, -1)
x = F.threshold(-x, 0, 0)
return x
class SEBlock(nn.Module):
def __init__(self, in_channels, out_channels, ratio=4):
super().__init__()
num_mid_filter = out_channels // ratio
self.pool = nn.AdaptiveAvgPool2d(1)
self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=
num_mid_filter, kernel_size=1, bias=True)
self.relu1 = nn.ReLU()
self.conv2 = nn.Conv2d(in_channels=num_mid_filter, kernel_size=1,
out_channels=out_channels, bias=True)
self.relu2 = HardSigmoid()
def forward(self, x):
attn = self.pool(x)
attn = self.conv1(attn)
attn = self.relu1(attn)
attn = self.conv2(attn)
attn = self.relu2(attn)
return x * attn
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tl.store(in_out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_add_convolution_mul_neg_threshold_2(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.2
tmp4 = tmp2 * tmp3
tmp5 = 0.5
tmp6 = tmp4 + tmp5
tmp7 = -tmp6
tmp8 = -1.0
tmp9 = tmp7 <= tmp8
tmp10 = tl.where(tmp9, tmp8, tmp7)
tmp11 = -tmp10
tmp12 = 0.0
tmp13 = tmp11 <= tmp12
tl.store(out_ptr0 + x2, tmp9, xmask)
tl.store(out_ptr1 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_add_convolution_mul_neg_threshold_3(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex // 16
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp2 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp3 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp5 = tmp3 + tmp4
tmp6 = 0.2
tmp7 = tmp5 * tmp6
tmp8 = 0.5
tmp9 = tmp7 + tmp8
tmp10 = -tmp9
tmp11 = -1.0
tmp12 = tl.where(tmp2, tmp11, tmp10)
tmp13 = -tmp12
tmp14 = 0.0
tmp15 = tl.where(tmp1, tmp14, tmp13)
tmp16 = tmp0 * tmp15
tl.store(out_ptr0 + x3, tmp16, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 1, 1, 1), (1, 1, 1, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(4)](buf3, primals_3, 4,
XBLOCK=4, num_warps=1, num_stages=1)
del primals_3
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1))
buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
buf6 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
triton_poi_fused_add_convolution_mul_neg_threshold_2[grid(16)](buf4,
primals_5, buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_convolution_mul_neg_threshold_3[grid(256)](
primals_1, buf6, buf5, buf4, primals_5, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf4
del primals_5
return buf7, primals_1, primals_2, primals_4, buf1, buf3, buf5, buf6
class HardSigmoid(nn.Module):
def __init__(self, slope=0.2, offset=0.5):
super().__init__()
self.slope = slope
self.offset = offset
def forward(self, x):
x = self.slope * x + self.offset
x = F.threshold(-x, -1, -1)
x = F.threshold(-x, 0, 0)
return x
class SEBlockNew(nn.Module):
def __init__(self, in_channels, out_channels, ratio=4):
super().__init__()
num_mid_filter = out_channels // ratio
self.pool = nn.AdaptiveAvgPool2d(1)
self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=
num_mid_filter, kernel_size=1, bias=True)
self.relu1 = nn.ReLU()
self.conv2 = nn.Conv2d(in_channels=num_mid_filter, kernel_size=1,
out_channels=out_channels, bias=True)
self.relu2 = HardSigmoid()
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| Vivianyzw/Dual.DBNet.pytorch | SEBlock | false | 1,193 | [
"Apache-2.0",
"MIT"
] | 0 | 19d823ed7c05076c087a3f7ad1127c71c1c0d692 | https://github.com/Vivianyzw/Dual.DBNet.pytorch/tree/19d823ed7c05076c087a3f7ad1127c71c1c0d692 | import torch
from torch import nn
import torch.nn.functional as F
class HardSigmoid(nn.Module):
def __init__(self, slope=0.2, offset=0.5):
super().__init__()
self.slope = slope
self.offset = offset
def forward(self, x):
x = self.slope * x + self.offset
x = F.threshold(-x, -1, -1)
x = F.threshold(-x, 0, 0)
return x
class Model(nn.Module):
def __init__(self, in_channels, out_channels, ratio=4):
super().__init__()
num_mid_filter = out_channels // ratio
self.pool = nn.AdaptiveAvgPool2d(1)
self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=
num_mid_filter, kernel_size=1, bias=True)
self.relu1 = nn.ReLU()
self.conv2 = nn.Conv2d(in_channels=num_mid_filter, kernel_size=1,
out_channels=out_channels, bias=True)
self.relu2 = HardSigmoid()
def forward(self, x):
attn = self.pool(x)
attn = self.conv1(attn)
attn = self.relu1(attn)
attn = self.conv2(attn)
attn = self.relu2(attn)
return x * attn
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
CELoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/td/ctdj5kazgiki6gdaadhqtp2x7tq2ee5ey5hqqdcoqmp54jyhf74f.py
# Topologically Sorted Source Nodes: [cross_entropy], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# cross_entropy => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax), kwargs = {})
triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/4s/c4sinl6spiqxhpvyucixluedc7ia6rt4kmscnsrfq6ef6uymhfpa.py
# Topologically Sorted Source Nodes: [cross_entropy, mul], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div]
# Source node to ATen node mapping:
# cross_entropy => div, exp, log, mul, neg, sub_1, sum_1, sum_2
# mul => mul_1
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %arg0_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_2,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Scalar](args = (%neg, 64), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, 1.0), kwargs = {})
triton_per_fused__log_softmax_div_mul_neg_sum_1 = async_compile.triton('triton_per_fused__log_softmax_div_mul_neg_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_div_mul_neg_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 6, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_div_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = (rindex // 64)
tmp0 = tl.load(in_ptr0 + (r3), None)
tmp1 = tl.load(in_ptr0 + (r0 + (64*r2)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (r3), None)
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp15 = tmp13 * tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp19 = -tmp18
tmp20 = 0.015625
tmp21 = tmp19 * tmp20
tmp22 = 1.0
tmp23 = tmp21 * tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp23, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cross_entropy], Original ATen: [aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [cross_entropy, mul], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div]
triton_per_fused__log_softmax_div_mul_neg_sum_1.run(buf2, buf0, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del buf0
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class CELoss(nn.Module):
""" Cross Entorpy Loss Wrapper
Args:
loss_weight (float): Weight of the loss. Default: 1.0.
"""
def __init__(self, loss_weight=1.0):
super().__init__()
self.loss_weight = loss_weight
self.criterion = nn.CrossEntropyLoss()
def forward(self, output, target):
"""Forward function."""
return self.loss_weight * self.criterion(output, target)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_div_mul_neg_sum_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr1 + r3, None)
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp15 = tmp13 * tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp19 = -tmp18
tmp20 = 0.015625
tmp21 = tmp19 * tmp20
tmp22 = 1.0
tmp23 = tmp21 * tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp23, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused__log_softmax_div_mul_neg_sum_1[grid(1)](buf2, buf0,
arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del buf0
return buf2,
class CELossNew(nn.Module):
""" Cross Entorpy Loss Wrapper
Args:
loss_weight (float): Weight of the loss. Default: 1.0.
"""
def __init__(self, loss_weight=1.0):
super().__init__()
self.loss_weight = loss_weight
self.criterion = nn.CrossEntropyLoss()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| WangXin93/mmpose | CELoss | false | 1,194 | [
"Apache-2.0"
] | 0 | 28b6e9ac2f6ed195ab27fb04da2213fc885a5994 | https://github.com/WangXin93/mmpose/tree/28b6e9ac2f6ed195ab27fb04da2213fc885a5994 | import torch
import torch.nn as nn
class Model(nn.Module):
""" Cross Entorpy Loss Wrapper
Args:
loss_weight (float): Weight of the loss. Default: 1.0.
"""
def __init__(self, loss_weight=1.0):
super().__init__()
self.loss_weight = loss_weight
self.criterion = nn.CrossEntropyLoss()
def forward(self, output, target):
"""Forward function."""
return self.loss_weight * self.criterion(output, target)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
AttentionPool2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ov/covbryzjnff2kb26c5gkcqbvct6kdwzanlx3iu6ee24itsit76o3.py
# Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# mean => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%permute, [0], True), kwargs = {})
triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/3y/c3yxwtlf7q4oo5ov7royd257tecdxmuzqqag2za7buohahsb7mfs.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.cat, aten.add]
# Source node to ATen node mapping:
# x_1 => cat
# x_2 => add
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%mean, %permute],), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%cat, %unsqueeze), kwargs = {})
triton_poi_fused_add_cat_1 = async_compile.triton('triton_poi_fused_add_cat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 272
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16)
x3 = xindex % 16
x0 = xindex % 4
x4 = xindex
tmp15 = tl.load(in_ptr2 + (x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x3), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = 16.0
tmp7 = tmp5 / tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 17, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tl.load(in_ptr1 + ((16*x3) + ((-1) + x2)), tmp10 & xmask, eviction_policy='evict_last', other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tmp16 = tmp14 + tmp15
tl.store(out_ptr0 + (x4), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/uc/cucgrga44gtlwzw6ehoy4jrwzm5fghn3ljf7iugmdjhe6m7mjcas.py
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_1 => cat_1
# Graph fragment:
# %cat_1 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_6, %primals_7, %primals_8],), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 12
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + ((-4) + x0), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tl.load(in_ptr2 + ((-8) + x0), tmp11 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tl.where(tmp9, tmp10, tmp14)
tmp16 = tl.where(tmp4, tmp5, tmp15)
tl.store(out_ptr0 + (x0), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/gq/cgqswernq7km7s5e6iqvidwhg6nuo5b5zp6m5py3pnbrdzepe236.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul, aten.transpose]
# Source node to ATen node mapping:
# multi_head_attention_forward => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%view_10, 1.0), kwargs = {})
# %permute_17 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%view_13, [0, 2, 1]), kwargs = {})
triton_poi_fused_mul_transpose_3 = async_compile.triton('triton_poi_fused_mul_transpose_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 32], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_transpose_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_transpose_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 17
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
tmp0 = tl.load(in_ptr0 + (y3 + (16*x2)), xmask & ymask, eviction_policy='evict_last')
tmp1 = y0
tmp2 = tl.full([1, 1], 0, tl.int64)
tmp3 = tmp1 >= tmp2
tmp4 = tl.full([1, 1], 4, tl.int64)
tmp5 = tmp1 < tmp4
tmp6 = tl.load(in_ptr1 + (tl.broadcast_to(y0, [XBLOCK, YBLOCK])), tmp5 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp1 >= tmp4
tmp8 = tl.full([1, 1], 8, tl.int64)
tmp9 = tmp1 < tmp8
tmp10 = tmp7 & tmp9
tmp11 = tl.load(in_ptr2 + (tl.broadcast_to((-4) + y0, [XBLOCK, YBLOCK])), tmp10 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp1 >= tmp8
tmp13 = tl.full([1, 1], 12, tl.int64)
tmp14 = tmp1 < tmp13
tmp15 = tl.load(in_ptr3 + (tl.broadcast_to((-8) + y0, [XBLOCK, YBLOCK])), tmp12 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp16 = tl.where(tmp10, tmp11, tmp15)
tmp17 = tl.where(tmp5, tmp6, tmp16)
tmp18 = tmp0 + tmp17
tmp19 = 1.0
tmp20 = tmp18 * tmp19
tl.store(out_ptr0 + (x2 + (17*y3)), tmp20, xmask & ymask)
tl.store(out_ptr1 + (y3 + (16*x2)), tmp20, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/qz/cqzbvlh3tf752q5rj44eh5o54zbmctvajzgosqadhjdb5lbyqjy2.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul, aten.transpose]
# Source node to ATen node mapping:
# multi_head_attention_forward => mul_1
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%permute_7, 1.0), kwargs = {})
# %permute_18 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%view_14, [0, 2, 1]), kwargs = {})
triton_poi_fused_mul_transpose_4 = async_compile.triton('triton_poi_fused_mul_transpose_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 32], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_transpose_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_transpose_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 17
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
tmp0 = tl.load(in_ptr0 + (y3 + (16*x2)), xmask & ymask, eviction_policy='evict_last')
tmp1 = 4 + y0
tmp2 = tl.full([1, 1], 0, tl.int64)
tmp3 = tmp1 >= tmp2
tmp4 = tl.full([1, 1], 4, tl.int64)
tmp5 = tmp1 < tmp4
tmp6 = tl.load(in_ptr1 + (tl.broadcast_to(4 + y0, [XBLOCK, YBLOCK])), tmp5 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp1 >= tmp4
tmp8 = tl.full([1, 1], 8, tl.int64)
tmp9 = tmp1 < tmp8
tmp10 = tmp7 & tmp9
tmp11 = tl.load(in_ptr2 + (tl.broadcast_to(y0, [XBLOCK, YBLOCK])), tmp10 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp1 >= tmp8
tmp13 = tl.full([1, 1], 12, tl.int64)
tmp14 = tmp1 < tmp13
tmp15 = tl.load(in_ptr3 + (tl.broadcast_to((-4) + y0, [XBLOCK, YBLOCK])), tmp12 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp16 = tl.where(tmp10, tmp11, tmp15)
tmp17 = tl.where(tmp5, tmp6, tmp16)
tmp18 = tmp0 + tmp17
tmp19 = 1.0
tmp20 = tmp18 * tmp19
tl.store(out_ptr0 + (x2 + (17*y3)), tmp20, xmask & ymask)
tl.store(out_ptr1 + (y3 + (16*x2)), tmp20, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/q4/cq45jdoeuvgpxv4zofa7tafzriebamhh2wkhnkrmvo3v75s7gbkj.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._safe_softmax]
# Source node to ATen node mapping:
# multi_head_attention_forward => amax, any_1, div, eq, exp, full_default, logical_not, logical_not_1, sub, sum_1, where
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_15, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_15, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%view_15, -inf), kwargs = {})
# %logical_not : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%eq,), kwargs = {})
# %any_1 : [num_users=1] = call_function[target=torch.ops.aten.any.dim](args = (%logical_not, -1, True), kwargs = {})
# %logical_not_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%any_1,), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 17, 17], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%logical_not_1, %full_default, %div), kwargs = {})
triton_per_fused__safe_softmax_5 = async_compile.triton('triton_per_fused__safe_softmax_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[512, 32],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__safe_softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__safe_softmax_5(in_ptr0, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 272
rnumel = 17
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
x2 = xindex % 68
x3 = (xindex // 68)
tmp0 = tl.load(in_ptr0 + (r1 + (17*x0)), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = float("-inf")
tmp12 = tmp0 == tmp11
tmp13 = tmp12 == 0
tmp14 = tmp13.to(tl.int64)
tmp15 = (tmp14 != 0)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.where(rmask & xmask, tmp16, 0)
tmp19 = triton_helpers.any(tmp18, 1)[:, None]
tmp20 = tmp19 == 0
tmp21 = tmp6 / tmp10
tmp22 = 0.0
tmp23 = tl.where(tmp20, tmp22, tmp21)
tl.store(out_ptr3 + (r1 + (17*x2) + (1184*x3)), tmp23, rmask & xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/hp/chpbwsfm77p7craq5ckxpabfykbcahcp5xaoqi5y3wp6bfgatgpn.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
# Source node to ATen node mapping:
# multi_head_attention_forward => bmm_1
# Graph fragment:
# %bmm_1 : [num_users=1] = call_function[target=torch.ops.aten.bmm.default](args = (%view_16, %view_17), kwargs = {})
triton_poi_fused_bmm_6 = async_compile.triton('triton_poi_fused_bmm_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_bmm_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4624
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 289
x1 = (xindex // 289)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (289*(x1 % 4)) + (1184*(x1 // 4))), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/qb/cqb3dcbu2gzzir2rbde24vaunu7kucgqq3ftpydndkry6kdjyyqy.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# multi_head_attention_forward => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_8,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_7 = async_compile.triton('triton_poi_fused_clone_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 17
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (17*x1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + (16*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (17, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_0.run(primals_1, buf0, 16, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((17, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.cat, aten.add]
triton_poi_fused_add_cat_1.run(buf0, primals_1, primals_2, buf1, 272, grid=grid(272), stream=stream0)
del buf0
del primals_1
del primals_2
buf2 = empty_strided_cuda((68, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (68, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((68, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (68, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((12, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(primals_6, primals_7, primals_8, buf4, 12, grid=grid(12), stream=stream0)
buf5 = empty_strided_cuda((68, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(buf4, (4, ), (1, ), 8), reinterpret_tensor(buf1, (68, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf5)
del buf4
buf6 = empty_strided_cuda((4, 4, 17, 1), (68, 17, 1, 1), torch.float32)
buf17 = empty_strided_cuda((16, 1, 17), (1, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul, aten.transpose]
triton_poi_fused_mul_transpose_3.run(buf2, primals_6, primals_7, primals_8, buf6, buf17, 16, 17, grid=grid(16, 17), stream=stream0)
buf7 = reinterpret_tensor(buf2, (4, 4, 1, 17), (68, 17, 17, 1), 0); del buf2 # reuse
buf18 = empty_strided_cuda((16, 17, 1), (1, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul, aten.transpose]
triton_poi_fused_mul_transpose_4.run(buf3, primals_6, primals_7, primals_8, buf7, buf18, 16, 17, grid=grid(16, 17), stream=stream0)
del buf3
del primals_6
del primals_7
del primals_8
buf8 = empty_strided_cuda((16, 17, 17), (289, 17, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf6, (16, 17, 1), (17, 1, 0), 0), reinterpret_tensor(buf7, (16, 1, 17), (17, 0, 1), 0), out=buf8)
buf12 = empty_strided_cuda((4, 4, 17, 17), (1184, 289, 17, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._safe_softmax]
triton_per_fused__safe_softmax_5.run(buf8, buf12, 272, 17, grid=grid(272), stream=stream0)
buf13 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
triton_poi_fused_bmm_6.run(buf12, buf13, 4624, grid=grid(4624), stream=stream0)
buf14 = reinterpret_tensor(buf7, (16, 17, 1), (17, 1, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
extern_kernels.bmm(buf13, reinterpret_tensor(buf5, (16, 17, 1), (1, 16, 0), 0), out=buf14)
del buf13
buf15 = reinterpret_tensor(buf6, (17, 4, 4, 1), (16, 4, 1, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
triton_poi_fused_clone_7.run(buf14, buf15, 17, 16, grid=grid(17, 16), stream=stream0)
buf16 = reinterpret_tensor(buf14, (68, 4), (4, 1), 0); del buf14 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_10, reinterpret_tensor(buf15, (68, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf16)
del primals_10
return (reinterpret_tensor(buf16, (4, 4), (4, 1), 0), reinterpret_tensor(buf1, (68, 4), (4, 1), 0), buf12, reinterpret_tensor(buf15, (68, 4), (4, 1), 0), primals_9, reinterpret_tensor(buf5, (16, 1, 17), (1, 1, 16), 0), buf17, buf18, primals_5, primals_4, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((17, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
from torch import nn
import torch.distributed.nn
class AttentionPool2d(nn.Module):
def __init__(self, spacial_dim: 'int', embed_dim: 'int', num_heads:
'int', output_dim: 'int'=None):
super().__init__()
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim **
2 + 1, embed_dim) / embed_dim ** 0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(
2, 0, 1)
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0)
x = x + self.positional_embedding[:, None, :]
x, _ = F.multi_head_attention_forward(query=x, key=x, value=x,
embed_dim_to_check=x.shape[-1], num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight, k_proj_weight=self.k_proj.
weight, v_proj_weight=self.v_proj.weight, in_proj_weight=None,
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias,
self.v_proj.bias]), bias_k=None, bias_v=None, add_zero_attn=
False, dropout_p=0, out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias, use_separate_proj_weight=True,
training=self.training, need_weights=False)
return x[0]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'spacial_dim': 4, 'embed_dim': 4, 'num_heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
import torch.distributed.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 272
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x3 = xindex % 16
x0 = xindex % 4
x4 = xindex
tmp15 = tl.load(in_ptr2 + (x0 + 4 * x2), xmask, eviction_policy=
'evict_last')
tmp0 = x2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x3, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = 16.0
tmp7 = tmp5 / tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 17, tl.int64)
tmp13 = tl.load(in_ptr1 + (16 * x3 + (-1 + x2)), tmp10 & xmask,
eviction_policy='evict_last', other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tmp16 = tmp14 + tmp15
tl.store(out_ptr0 + x4, tmp16, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 12
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (-4 + x0), tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tl.full([1], 12, tl.int64)
tmp14 = tl.load(in_ptr2 + (-8 + x0), tmp11 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tl.where(tmp9, tmp10, tmp14)
tmp16 = tl.where(tmp4, tmp5, tmp15)
tl.store(out_ptr0 + x0, tmp16, xmask)
@triton.jit
def triton_poi_fused_mul_transpose_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.
constexpr):
ynumel = 16
xnumel = 17
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
tmp0 = tl.load(in_ptr0 + (y3 + 16 * x2), xmask & ymask, eviction_policy
='evict_last')
tmp1 = y0
tl.full([1, 1], 0, tl.int64)
tmp4 = tl.full([1, 1], 4, tl.int64)
tmp5 = tmp1 < tmp4
tmp6 = tl.load(in_ptr1 + tl.broadcast_to(y0, [XBLOCK, YBLOCK]), tmp5 &
xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp1 >= tmp4
tmp8 = tl.full([1, 1], 8, tl.int64)
tmp9 = tmp1 < tmp8
tmp10 = tmp7 & tmp9
tmp11 = tl.load(in_ptr2 + tl.broadcast_to(-4 + y0, [XBLOCK, YBLOCK]),
tmp10 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp1 >= tmp8
tl.full([1, 1], 12, tl.int64)
tmp15 = tl.load(in_ptr3 + tl.broadcast_to(-8 + y0, [XBLOCK, YBLOCK]),
tmp12 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp16 = tl.where(tmp10, tmp11, tmp15)
tmp17 = tl.where(tmp5, tmp6, tmp16)
tmp18 = tmp0 + tmp17
tmp19 = 1.0
tmp20 = tmp18 * tmp19
tl.store(out_ptr0 + (x2 + 17 * y3), tmp20, xmask & ymask)
tl.store(out_ptr1 + (y3 + 16 * x2), tmp20, xmask & ymask)
@triton.jit
def triton_poi_fused_mul_transpose_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.
constexpr):
ynumel = 16
xnumel = 17
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
tmp0 = tl.load(in_ptr0 + (y3 + 16 * x2), xmask & ymask, eviction_policy
='evict_last')
tmp1 = 4 + y0
tl.full([1, 1], 0, tl.int64)
tmp4 = tl.full([1, 1], 4, tl.int64)
tmp5 = tmp1 < tmp4
tmp6 = tl.load(in_ptr1 + tl.broadcast_to(4 + y0, [XBLOCK, YBLOCK]),
tmp5 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp1 >= tmp4
tmp8 = tl.full([1, 1], 8, tl.int64)
tmp9 = tmp1 < tmp8
tmp10 = tmp7 & tmp9
tmp11 = tl.load(in_ptr2 + tl.broadcast_to(y0, [XBLOCK, YBLOCK]), tmp10 &
xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp1 >= tmp8
tl.full([1, 1], 12, tl.int64)
tmp15 = tl.load(in_ptr3 + tl.broadcast_to(-4 + y0, [XBLOCK, YBLOCK]),
tmp12 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp16 = tl.where(tmp10, tmp11, tmp15)
tmp17 = tl.where(tmp5, tmp6, tmp16)
tmp18 = tmp0 + tmp17
tmp19 = 1.0
tmp20 = tmp18 * tmp19
tl.store(out_ptr0 + (x2 + 17 * y3), tmp20, xmask & ymask)
tl.store(out_ptr1 + (y3 + 16 * x2), tmp20, xmask & ymask)
@triton.jit
def triton_per_fused__safe_softmax_5(in_ptr0, out_ptr3, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 272
rnumel = 17
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
x2 = xindex % 68
x3 = xindex // 68
tmp0 = tl.load(in_ptr0 + (r1 + 17 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = float('-inf')
tmp12 = tmp0 == tmp11
tmp13 = tmp12 == 0
tmp14 = tmp13.to(tl.int64)
tmp15 = tmp14 != 0
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.where(rmask & xmask, tmp16, 0)
tmp19 = triton_helpers.any(tmp18, 1)[:, None]
tmp20 = tmp19 == 0
tmp21 = tmp6 / tmp10
tmp22 = 0.0
tmp23 = tl.where(tmp20, tmp22, tmp21)
tl.store(out_ptr3 + (r1 + 17 * x2 + 1184 * x3), tmp23, rmask & xmask)
@triton.jit
def triton_poi_fused_bmm_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4624
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 289
x1 = xindex // 289
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 289 * (x1 % 4) + 1184 * (x1 // 4)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 17
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 17 * x1), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (17, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](primals_1, buf0, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf1 = empty_strided_cuda((17, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_cat_1[grid(272)](buf0, primals_1, primals_2,
buf1, 272, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_1
del primals_2
buf2 = empty_strided_cuda((68, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (68, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((68, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (68, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((12,), (1,), torch.float32)
triton_poi_fused_cat_2[grid(12)](primals_6, primals_7, primals_8,
buf4, 12, XBLOCK=16, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((68, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(buf4, (4,), (1,), 8),
reinterpret_tensor(buf1, (68, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta
=1, out=buf5)
del buf4
buf6 = empty_strided_cuda((4, 4, 17, 1), (68, 17, 1, 1), torch.float32)
buf17 = empty_strided_cuda((16, 1, 17), (1, 1, 16), torch.float32)
triton_poi_fused_mul_transpose_3[grid(16, 17)](buf2, primals_6,
primals_7, primals_8, buf6, buf17, 16, 17, XBLOCK=16, YBLOCK=16,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf2, (4, 4, 1, 17), (68, 17, 17, 1), 0)
del buf2
buf18 = empty_strided_cuda((16, 17, 1), (1, 16, 1), torch.float32)
triton_poi_fused_mul_transpose_4[grid(16, 17)](buf3, primals_6,
primals_7, primals_8, buf7, buf18, 16, 17, XBLOCK=32, YBLOCK=16,
num_warps=4, num_stages=1)
del buf3
del primals_6
del primals_7
del primals_8
buf8 = empty_strided_cuda((16, 17, 17), (289, 17, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf6, (16, 17, 1), (17, 1, 0),
0), reinterpret_tensor(buf7, (16, 1, 17), (17, 0, 1), 0), out=buf8)
buf12 = empty_strided_cuda((4, 4, 17, 17), (1184, 289, 17, 1),
torch.float32)
triton_per_fused__safe_softmax_5[grid(272)](buf8, buf12, 272, 17,
XBLOCK=32, num_warps=8, num_stages=1)
buf13 = buf8
del buf8
triton_poi_fused_bmm_6[grid(4624)](buf12, buf13, 4624, XBLOCK=256,
num_warps=4, num_stages=1)
buf14 = reinterpret_tensor(buf7, (16, 17, 1), (17, 1, 1), 0)
del buf7
extern_kernels.bmm(buf13, reinterpret_tensor(buf5, (16, 17, 1), (1,
16, 0), 0), out=buf14)
del buf13
buf15 = reinterpret_tensor(buf6, (17, 4, 4, 1), (16, 4, 1, 1), 0)
del buf6
triton_poi_fused_clone_7[grid(17, 16)](buf14, buf15, 17, 16, XBLOCK
=16, YBLOCK=32, num_warps=4, num_stages=1)
buf16 = reinterpret_tensor(buf14, (68, 4), (4, 1), 0)
del buf14
extern_kernels.addmm(primals_10, reinterpret_tensor(buf15, (68, 4),
(4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf16)
del primals_10
return reinterpret_tensor(buf16, (4, 4), (4, 1), 0), reinterpret_tensor(
buf1, (68, 4), (4, 1), 0), buf12, reinterpret_tensor(buf15, (68, 4),
(4, 1), 0), primals_9, reinterpret_tensor(buf5, (16, 1, 17), (1, 1,
16), 0), buf17, buf18, primals_5, primals_4, primals_3
class AttentionPool2dNew(nn.Module):
def __init__(self, spacial_dim: 'int', embed_dim: 'int', num_heads:
'int', output_dim: 'int'=None):
super().__init__()
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim **
2 + 1, embed_dim) / embed_dim ** 0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, input_0):
primals_2 = self.positional_embedding
primals_3 = self.k_proj.weight
primals_6 = self.k_proj.bias
primals_4 = self.q_proj.weight
primals_7 = self.q_proj.bias
primals_5 = self.v_proj.weight
primals_8 = self.v_proj.bias
primals_9 = self.c_proj.weight
primals_10 = self.c_proj.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
| Vaishaal/open_clip | AttentionPool2d | false | 1,195 | [
"MIT"
] | 0 | 8877c4036dacde022da90769c64006d9f2c82e84 | https://github.com/Vaishaal/open_clip/tree/8877c4036dacde022da90769c64006d9f2c82e84 | import torch
import torch.nn.functional as F
from torch import nn
import torch.distributed.nn
class Model(nn.Module):
def __init__(self, spacial_dim: 'int', embed_dim: 'int', num_heads:
'int', output_dim: 'int'=None):
super().__init__()
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim **
2 + 1, embed_dim) / embed_dim ** 0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(
2, 0, 1)
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0)
x = x + self.positional_embedding[:, None, :]
x, _ = F.multi_head_attention_forward(query=x, key=x, value=x,
embed_dim_to_check=x.shape[-1], num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight, k_proj_weight=self.k_proj.
weight, v_proj_weight=self.v_proj.weight, in_proj_weight=None,
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias,
self.v_proj.bias]), bias_k=None, bias_v=None, add_zero_attn=
False, dropout_p=0, out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias, use_separate_proj_weight=True,
training=self.training, need_weights=False)
return x[0]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
TemperatureScaleTrainer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/at/catwgbiwmbg7xppf43cvbcn5d6non7ron2vvi5h77ptca3ltb2j6.py
# Topologically Sorted Source Nodes: [truediv], Original ATen: [aten.div]
# Source node to ATen node mapping:
# truediv => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_2, %expand), kwargs = {})
triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 / tmp2
tl.store(out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1, ), (1, ))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [truediv], Original ATen: [aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_0.run(primals_2, primals_1, buf0, 256, grid=grid(256), stream=stream0)
return (buf0, primals_1, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class TemperatureScaleTrainer(nn.Module):
def __init__(self):
super(TemperatureScaleTrainer, self).__init__()
self._temperature = nn.Parameter(torch.ones(1), requires_grad=True)
if torch.cuda.is_available():
self._temperature
self
def forward(self, logits: 'torch.Tensor'):
expanded_temperature = self._temperature.unsqueeze(1).expand(logits
.size(0), logits.size(1))
if torch.cuda.is_available():
expanded_temperature = expanded_temperature
logits
return logits / expanded_temperature
def get_parameters(self):
return self._temperature
def get_temperature(self):
return self._temperature.item()
def set_temperature(self, t: 'float'):
self._temperature = nn.Parameter(torch.tensor([t]), requires_grad=True)
if torch.cuda.is_available():
self._temperature
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 / tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(256)](primals_2, primals_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
return buf0, primals_1, primals_2
class TemperatureScaleTrainerNew(nn.Module):
def __init__(self):
super(TemperatureScaleTrainerNew, self).__init__()
self._temperature = nn.Parameter(torch.ones(1), requires_grad=True)
if torch.cuda.is_available():
self._temperature
self
def get_parameters(self):
return self._temperature
def get_temperature(self):
return self._temperature.item()
def set_temperature(self, t: 'float'):
self._temperature = nn.Parameter(torch.tensor([t]), requires_grad=True)
if torch.cuda.is_available():
self._temperature
def forward(self, input_0):
primals_1 = self._temperature
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| Kageshimasu/temperature-scaling-optimizer | TemperatureScaleTrainer | false | 1,196 | [
"MIT"
] | 0 | 3af562e6c3fefef97aec0431d08b8e8275d275c7 | https://github.com/Kageshimasu/temperature-scaling-optimizer/tree/3af562e6c3fefef97aec0431d08b8e8275d275c7 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
self._temperature = nn.Parameter(torch.ones(1), requires_grad=True)
if torch.cuda.is_available():
self._temperature
self
def forward(self, logits: 'torch.Tensor'):
expanded_temperature = self._temperature.unsqueeze(1).expand(logits
.size(0), logits.size(1))
if torch.cuda.is_available():
expanded_temperature = expanded_temperature
logits
return logits / expanded_temperature
def get_parameters(self):
return self._temperature
def get_temperature(self):
return self._temperature.item()
def set_temperature(self, t: 'float'):
self._temperature = nn.Parameter(torch.tensor([t]), requires_grad=True)
if torch.cuda.is_available():
self._temperature
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
ELUPlus | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/y4/cy4n3trndgzbyxuh7lxf5gowezusvtxrwhp6q3fl6sve7pr6sqky.py
# Topologically Sorted Source Nodes: [elu, add], Original ATen: [aten.elu, aten.add]
# Source node to ATen node mapping:
# add => add
# elu => expm1, gt, mul, mul_1, mul_2, where
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1.0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1.0), kwargs = {})
# %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_1,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%where, 1.0), kwargs = {})
triton_poi_fused_add_elu_0 = async_compile.triton('triton_poi_fused_add_elu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_elu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tmp8 = tmp7 + tmp3
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [elu, add], Original ATen: [aten.elu, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_elu_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn
class ELUPlus(nn.Module):
def __init__(self):
super().__init__()
self.elu = nn.ELU()
def forward(self, x):
return self.elu(x) + 1.0
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tmp8 = tmp7 + tmp3
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_elu_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ELUPlusNew(nn.Module):
def __init__(self):
super().__init__()
self.elu = nn.ELU()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| UzTak/nflows | ELUPlus | false | 1,197 | [
"MIT"
] | 0 | 7211b129bfd60fabed199a1d2a3272b2aac8bbda | https://github.com/UzTak/nflows/tree/7211b129bfd60fabed199a1d2a3272b2aac8bbda | import torch
from torch import nn
import torch.nn
class Model(nn.Module):
def __init__(self):
super().__init__()
self.elu = nn.ELU()
def forward(self, x):
return self.elu(x) + 1.0
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
InteractingLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/vb/cvbie2qzppig2lfiaox6fsifr4m3szei3bwub2h4jhezugadmtam.py
# Topologically Sorted Source Nodes: [querys_1], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# querys_1 => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%getitem, %getitem_1],), kwargs = {})
triton_poi_fused_stack_0 = async_compile.triton('triton_poi_fused_stack_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 32)
x0 = xindex % 8
x1 = (xindex // 8) % 4
x3 = xindex
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr0 + (8 + x0 + (16*x1) + (64*((-4) + x2))), tmp6 & xmask, other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/d7/cd7xa5d4yg5y7exr6s4sr25rd6okj4v7452l7cyhxnqr3mcd4qhj.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_15, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_15, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/kx/ckxzcazhsdasvh5sdcvshdrxriufwxfrn25tt7nuks5deb2u6ei5.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/rg/crg347qwcf7vy2xko33empdwam2lpkatpcrrqyfrcymucxldnijc.py
# Topologically Sorted Source Nodes: [result_4], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# result_4 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%squeeze_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_3 = async_compile.triton('triton_poi_fused_relu_threshold_backward_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp11 = tl.load(in_out_ptr0 + (x2), xmask)
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 8, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((8*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 16, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr0 + (128 + (8*x1) + ((-8) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tmp12 = tmp10 + tmp11
tmp13 = tl.full([1], 0, tl.int32)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp15 = 0.0
tmp16 = tmp14 <= tmp15
tl.store(in_out_ptr0 + (x2), tmp14, xmask)
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 16), (16, 1))
assert_size_stride(primals_3, (4, 16), (16, 1))
assert_size_stride(primals_4, (4, 16), (16, 1))
assert_size_stride(primals_5, (4, 16), (16, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [querys], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_2, out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [keys], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_3, out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [values], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_4, out=buf2)
del primals_4
buf3 = empty_strided_cuda((8, 4, 8), (32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [querys_1], Original ATen: [aten.stack]
stream0 = get_raw_stream(0)
triton_poi_fused_stack_0.run(buf0, buf3, 256, grid=grid(256), stream=stream0)
buf4 = reinterpret_tensor(buf0, (8, 4, 8), (32, 8, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [keys_1], Original ATen: [aten.stack]
triton_poi_fused_stack_0.run(buf1, buf4, 256, grid=grid(256), stream=stream0)
buf5 = empty_strided_cuda((8, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [inner_product], Original ATen: [aten.bmm]
extern_kernels.bmm(buf3, reinterpret_tensor(buf4, (8, 8, 4), (32, 1, 8), 0), out=buf5)
buf6 = empty_strided_cuda((2, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf5, buf6, 128, grid=grid(128), stream=stream0)
buf7 = reinterpret_tensor(buf5, (2, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf6, buf7, 128, grid=grid(128), stream=stream0)
del buf6
buf8 = reinterpret_tensor(buf1, (8, 4, 8), (32, 8, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [values_1], Original ATen: [aten.stack]
triton_poi_fused_stack_0.run(buf2, buf8, 256, grid=grid(256), stream=stream0)
buf9 = reinterpret_tensor(buf2, (8, 4, 8), (32, 8, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [result], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf7, (8, 4, 4), (16, 4, 1), 0), buf8, out=buf9)
buf10 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [tensordot_3], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_5, out=buf10)
del primals_5
buf11 = reinterpret_tensor(buf10, (4, 4, 16), (64, 16, 1), 0); del buf10 # reuse
buf12 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.bool)
# Topologically Sorted Source Nodes: [result_4], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_3.run(buf11, buf9, buf12, 256, grid=grid(256), stream=stream0)
del buf9
return (buf11, buf7, buf7, buf12, reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), reinterpret_tensor(buf8, (8, 8, 4), (32, 1, 8), 0), reinterpret_tensor(buf3, (8, 8, 4), (32, 1, 8), 0), buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics import *
class InteractingLayer(nn.Module):
"""A Layer used in AutoInt that model the correlations between different feature fields by multi-head self-attention mechanism.
Input shape
- A 3D tensor with shape: ``(batch_size,field_size,embedding_size)``.
Output shape
- 3D tensor with shape:``(batch_size,field_size,att_embedding_size * head_num)``.
Arguments
- **in_features** : Positive integer, dimensionality of input features.
- **att_embedding_size**: int.The embedding size in multi-head self-attention network.
- **head_num**: int.The head number in multi-head self-attention network.
- **use_res**: bool.Whether or not use standard residual connections before output.
- **seed**: A Python integer to use as random seed.
References
- [Song W, Shi C, Xiao Z, et al. AutoInt: Automatic Feature Interaction Learning via Self-Attentive Neural Networks[J]. arXiv preprint arXiv:1810.11921, 2018.](https://arxiv.org/abs/1810.11921)
"""
def __init__(self, in_features, att_embedding_size=8, head_num=2,
use_res=True, seed=1024, device='cpu'):
super(InteractingLayer, self).__init__()
if head_num <= 0:
raise ValueError('head_num must be a int > 0')
self.att_embedding_size = att_embedding_size
self.head_num = head_num
self.use_res = use_res
self.seed = seed
embedding_size = in_features
self.W_Query = nn.Parameter(torch.Tensor(embedding_size, self.
att_embedding_size * self.head_num))
self.W_key = nn.Parameter(torch.Tensor(embedding_size, self.
att_embedding_size * self.head_num))
self.W_Value = nn.Parameter(torch.Tensor(embedding_size, self.
att_embedding_size * self.head_num))
if self.use_res:
self.W_Res = nn.Parameter(torch.Tensor(embedding_size, self.
att_embedding_size * self.head_num))
for tensor in self.parameters():
nn.init.normal_(tensor, mean=0.0, std=0.05)
self
def forward(self, inputs):
if len(inputs.shape) != 3:
raise ValueError(
'Unexpected inputs dimensions %d, expect to be 3 dimensions' %
len(inputs.shape))
querys = torch.tensordot(inputs, self.W_Query, dims=([-1], [0]))
keys = torch.tensordot(inputs, self.W_key, dims=([-1], [0]))
values = torch.tensordot(inputs, self.W_Value, dims=([-1], [0]))
querys = torch.stack(torch.split(querys, self.att_embedding_size,
dim=2))
keys = torch.stack(torch.split(keys, self.att_embedding_size, dim=2))
values = torch.stack(torch.split(values, self.att_embedding_size,
dim=2))
inner_product = torch.einsum('bnik,bnjk->bnij', querys, keys)
self.normalized_att_scores = F.softmax(inner_product, dim=-1)
result = torch.matmul(self.normalized_att_scores, values)
result = torch.cat(torch.split(result, 1), dim=-1)
result = torch.squeeze(result, dim=0)
if self.use_res:
result += torch.tensordot(inputs, self.W_Res, dims=([-1], [0]))
result = F.relu(result)
return result
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
from sklearn.metrics import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 32
x0 = xindex % 8
x1 = xindex // 8 % 4
x3 = xindex
tmp0 = x2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr0 + (8 + x0 + 16 * x1 + 64 * (-4 + x2)), tmp6 &
xmask, other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp11 = tl.load(in_out_ptr0 + x2, xmask)
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 8, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (8 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 16, tl.int64)
tmp9 = tl.load(in_ptr0 + (128 + 8 * x1 + (-8 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tmp12 = tmp10 + tmp11
tmp13 = tl.full([1], 0, tl.int32)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp15 = 0.0
tmp16 = tmp14 <= tmp15
tl.store(in_out_ptr0 + x2, tmp14, xmask)
tl.store(out_ptr0 + x2, tmp16, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 16), (16, 1))
assert_size_stride(primals_3, (4, 16), (16, 1))
assert_size_stride(primals_4, (4, 16), (16, 1))
assert_size_stride(primals_5, (4, 16), (16, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
primals_2, out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
primals_3, out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
primals_4, out=buf2)
del primals_4
buf3 = empty_strided_cuda((8, 4, 8), (32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_stack_0[grid(256)](buf0, buf3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf0, (8, 4, 8), (32, 8, 1), 0)
del buf0
triton_poi_fused_stack_0[grid(256)](buf1, buf4, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((8, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf3, reinterpret_tensor(buf4, (8, 8, 4), (32, 1,
8), 0), out=buf5)
buf6 = empty_strided_cuda((2, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(128)](buf5, buf6, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf5, (2, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_2[grid(128)](buf6, buf7, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del buf6
buf8 = reinterpret_tensor(buf1, (8, 4, 8), (32, 8, 1), 0)
del buf1
triton_poi_fused_stack_0[grid(256)](buf2, buf8, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf9 = reinterpret_tensor(buf2, (8, 4, 8), (32, 8, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (8, 4, 4), (16, 4, 1),
0), buf8, out=buf9)
buf10 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
primals_5, out=buf10)
del primals_5
buf11 = reinterpret_tensor(buf10, (4, 4, 16), (64, 16, 1), 0)
del buf10
buf12 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_3[grid(256)](buf11, buf9,
buf12, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf9
return buf11, buf7, buf7, buf12, reinterpret_tensor(primals_1, (4, 16),
(1, 4), 0), reinterpret_tensor(buf8, (8, 8, 4), (32, 1, 8), 0
), reinterpret_tensor(buf3, (8, 8, 4), (32, 1, 8), 0), buf4
class InteractingLayerNew(nn.Module):
"""A Layer used in AutoInt that model the correlations between different feature fields by multi-head self-attention mechanism.
Input shape
- A 3D tensor with shape: ``(batch_size,field_size,embedding_size)``.
Output shape
- 3D tensor with shape:``(batch_size,field_size,att_embedding_size * head_num)``.
Arguments
- **in_features** : Positive integer, dimensionality of input features.
- **att_embedding_size**: int.The embedding size in multi-head self-attention network.
- **head_num**: int.The head number in multi-head self-attention network.
- **use_res**: bool.Whether or not use standard residual connections before output.
- **seed**: A Python integer to use as random seed.
References
- [Song W, Shi C, Xiao Z, et al. AutoInt: Automatic Feature Interaction Learning via Self-Attentive Neural Networks[J]. arXiv preprint arXiv:1810.11921, 2018.](https://arxiv.org/abs/1810.11921)
"""
def __init__(self, in_features, att_embedding_size=8, head_num=2,
use_res=True, seed=1024, device='cpu'):
super(InteractingLayerNew, self).__init__()
if head_num <= 0:
raise ValueError('head_num must be a int > 0')
self.att_embedding_size = att_embedding_size
self.head_num = head_num
self.use_res = use_res
self.seed = seed
embedding_size = in_features
self.W_Query = nn.Parameter(torch.Tensor(embedding_size, self.
att_embedding_size * self.head_num))
self.W_key = nn.Parameter(torch.Tensor(embedding_size, self.
att_embedding_size * self.head_num))
self.W_Value = nn.Parameter(torch.Tensor(embedding_size, self.
att_embedding_size * self.head_num))
if self.use_res:
self.W_Res = nn.Parameter(torch.Tensor(embedding_size, self.
att_embedding_size * self.head_num))
for tensor in self.parameters():
nn.init.normal_(tensor, mean=0.0, std=0.05)
self
def forward(self, input_0):
primals_2 = self.W_Query
primals_3 = self.W_key
primals_4 = self.W_Value
primals_5 = self.W_Res
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| Ulian7/DeepCTR | InteractingLayer | false | 1,198 | [
"Apache-2.0"
] | 0 | d8f519a722a4d6a4f1fe18e04af54cfd1369c9a5 | https://github.com/Ulian7/DeepCTR/tree/d8f519a722a4d6a4f1fe18e04af54cfd1369c9a5 | import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics import *
class Model(nn.Module):
"""A Layer used in AutoInt that model the correlations between different feature fields by multi-head self-attention mechanism.
Input shape
- A 3D tensor with shape: ``(batch_size,field_size,embedding_size)``.
Output shape
- 3D tensor with shape:``(batch_size,field_size,att_embedding_size * head_num)``.
Arguments
- **in_features** : Positive integer, dimensionality of input features.
- **att_embedding_size**: int.The embedding size in multi-head self-attention network.
- **head_num**: int.The head number in multi-head self-attention network.
- **use_res**: bool.Whether or not use standard residual connections before output.
- **seed**: A Python integer to use as random seed.
References
- [Song W, Shi C, Xiao Z, et al. AutoInt: Automatic Feature Interaction Learning via Self-Attentive Neural Networks[J]. arXiv preprint arXiv:1810.11921, 2018.](https://arxiv.org/abs/1810.11921)
"""
def __init__(self, in_features, att_embedding_size=8, head_num=2,
use_res=True, seed=1024, device='cpu'):
super().__init__()
if head_num <= 0:
raise ValueError('head_num must be a int > 0')
self.att_embedding_size = att_embedding_size
self.head_num = head_num
self.use_res = use_res
self.seed = seed
embedding_size = in_features
self.W_Query = nn.Parameter(torch.Tensor(embedding_size, self.
att_embedding_size * self.head_num))
self.W_key = nn.Parameter(torch.Tensor(embedding_size, self.
att_embedding_size * self.head_num))
self.W_Value = nn.Parameter(torch.Tensor(embedding_size, self.
att_embedding_size * self.head_num))
if self.use_res:
self.W_Res = nn.Parameter(torch.Tensor(embedding_size, self.
att_embedding_size * self.head_num))
for tensor in self.parameters():
nn.init.normal_(tensor, mean=0.0, std=0.05)
self
def forward(self, inputs):
if len(inputs.shape) != 3:
raise ValueError(
'Unexpected inputs dimensions %d, expect to be 3 dimensions' %
len(inputs.shape))
querys = torch.tensordot(inputs, self.W_Query, dims=([-1], [0]))
keys = torch.tensordot(inputs, self.W_key, dims=([-1], [0]))
values = torch.tensordot(inputs, self.W_Value, dims=([-1], [0]))
querys = torch.stack(torch.split(querys, self.att_embedding_size,
dim=2))
keys = torch.stack(torch.split(keys, self.att_embedding_size, dim=2))
values = torch.stack(torch.split(values, self.att_embedding_size,
dim=2))
inner_product = torch.einsum('bnik,bnjk->bnij', querys, keys)
self.normalized_att_scores = F.softmax(inner_product, dim=-1)
result = torch.matmul(self.normalized_att_scores, values)
result = torch.cat(torch.split(result, 1), dim=-1)
result = torch.squeeze(result, dim=0)
if self.use_res:
result += torch.tensordot(inputs, self.W_Res, dims=([-1], [0]))
result = F.relu(result)
return result
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4]
|
CombinedTargetMSELoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/3f/c3fha6hwigp5qdkirvgzpdtvtztnza4ys4zevam5i2owamrhkdzx.py
# Topologically Sorted Source Nodes: [heatmap_pred_1, heatmap_gt_1, mse_loss, mul_2, loss, mul_3, mul_4, mse_loss_1, mul_5, loss_1, mul_6, mul_7, mse_loss_2, mul_8, loss_2, truediv, mul_9], Original ATen: [aten.mul, aten.mse_loss, aten.add, aten.div]
# Source node to ATen node mapping:
# heatmap_gt_1 => mul_1
# heatmap_pred_1 => mul
# loss => add
# loss_1 => add_1
# loss_2 => add_2
# mse_loss => mean, pow_1, sub
# mse_loss_1 => mean_1, pow_2, sub_1
# mse_loss_2 => mean_2, pow_3, sub_2
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# mul_5 => mul_5
# mul_6 => mul_6
# mul_7 => mul_7
# mul_8 => mul_8
# mul_9 => mul_9
# truediv => div
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, %select), kwargs = {})
# %mul_1 : [num_users=5] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_1, %select_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mul_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 0.5), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, 0.0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %squeeze_2), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %squeeze_3), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_3, %mul_4), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_2,), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean_1, 0.5), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %mul_5), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %squeeze_4), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %squeeze_5), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_6, %mul_7), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 2), kwargs = {})
# %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_3,), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean_2, 0.5), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %mul_8), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_2, 1), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, 1.0), kwargs = {})
triton_per_fused_add_div_mse_loss_mul_0 = async_compile.triton('triton_per_fused_add_div_mse_loss_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mse_loss_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (4*r0), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr2 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr2 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = tmp2 - tmp4
tmp6 = tmp5 * tmp5
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.sum(tmp7, 1)[:, None]
tmp11 = tmp4 * tmp10
tmp13 = tmp4 * tmp12
tmp14 = tmp11 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.sum(tmp16, 1)[:, None]
tmp20 = tmp4 * tmp19
tmp22 = tmp4 * tmp21
tmp23 = tmp20 - tmp22
tmp24 = tmp23 * tmp23
tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK])
tmp27 = tl.sum(tmp25, 1)[:, None]
tmp28 = 4.0
tmp29 = tmp9 / tmp28
tmp30 = 0.5
tmp31 = tmp29 * tmp30
tmp32 = 0.0
tmp33 = tmp31 + tmp32
tmp34 = tmp18 / tmp28
tmp35 = tmp34 * tmp30
tmp36 = tmp33 + tmp35
tmp37 = tmp27 / tmp28
tmp38 = tmp37 * tmp30
tmp39 = tmp36 + tmp38
tmp40 = 1.0
tmp41 = tmp39 * tmp40
tmp42 = tmp41 * tmp40
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp42, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [heatmap_pred_1, heatmap_gt_1, mse_loss, mul_2, loss, mul_3, mul_4, mse_loss_1, mul_5, loss_1, mul_6, mul_7, mse_loss_2, mul_8, loss_2, truediv, mul_9], Original ATen: [aten.mul, aten.mse_loss, aten.add, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mse_loss_mul_0.run(buf3, arg0_1, arg2_1, arg1_1, 1, 4, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class CombinedTargetMSELoss(nn.Module):
"""MSE loss for combined target.
CombinedTarget: The combination of classification target
(response map) and regression target (offset map).
Paper ref: Huang et al. The Devil is in the Details: Delving into
Unbiased Data Processing for Human Pose Estimation (CVPR 2020).
Args:
use_target_weight (bool): Option to use weighted MSE loss.
Different joint types may have different target weights.
loss_weight (float): Weight of the loss. Default: 1.0.
"""
def __init__(self, use_target_weight, loss_weight=1.0):
super().__init__()
self.criterion = nn.MSELoss(reduction='mean')
self.use_target_weight = use_target_weight
self.loss_weight = loss_weight
def forward(self, output, target, target_weight):
batch_size = output.size(0)
num_channels = output.size(1)
heatmaps_pred = output.reshape((batch_size, num_channels, -1)).split(
1, 1)
heatmaps_gt = target.reshape((batch_size, num_channels, -1)).split(1, 1
)
loss = 0.0
num_joints = num_channels // 3
for idx in range(num_joints):
heatmap_pred = heatmaps_pred[idx * 3].squeeze()
heatmap_gt = heatmaps_gt[idx * 3].squeeze()
offset_x_pred = heatmaps_pred[idx * 3 + 1].squeeze()
offset_x_gt = heatmaps_gt[idx * 3 + 1].squeeze()
offset_y_pred = heatmaps_pred[idx * 3 + 2].squeeze()
offset_y_gt = heatmaps_gt[idx * 3 + 2].squeeze()
if self.use_target_weight:
heatmap_pred = heatmap_pred * target_weight[:, idx]
heatmap_gt = heatmap_gt * target_weight[:, idx]
loss += 0.5 * self.criterion(heatmap_pred, heatmap_gt)
loss += 0.5 * self.criterion(heatmap_gt * offset_x_pred,
heatmap_gt * offset_x_gt)
loss += 0.5 * self.criterion(heatmap_gt * offset_y_pred,
heatmap_gt * offset_y_gt)
return loss / num_joints * self.loss_weight
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'use_target_weight': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr2 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr2 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = tmp2 - tmp4
tmp6 = tmp5 * tmp5
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.sum(tmp7, 1)[:, None]
tmp11 = tmp4 * tmp10
tmp13 = tmp4 * tmp12
tmp14 = tmp11 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.sum(tmp16, 1)[:, None]
tmp20 = tmp4 * tmp19
tmp22 = tmp4 * tmp21
tmp23 = tmp20 - tmp22
tmp24 = tmp23 * tmp23
tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK])
tmp27 = tl.sum(tmp25, 1)[:, None]
tmp28 = 4.0
tmp29 = tmp9 / tmp28
tmp30 = 0.5
tmp31 = tmp29 * tmp30
tmp32 = 0.0
tmp33 = tmp31 + tmp32
tmp34 = tmp18 / tmp28
tmp35 = tmp34 * tmp30
tmp36 = tmp33 + tmp35
tmp37 = tmp27 / tmp28
tmp38 = tmp37 * tmp30
tmp39 = tmp36 + tmp38
tmp40 = 1.0
tmp41 = tmp39 * tmp40
tmp42 = tmp41 * tmp40
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp42, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mse_loss_mul_0[grid(1)](buf3, arg0_1,
arg2_1, arg1_1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf3,
class CombinedTargetMSELossNew(nn.Module):
"""MSE loss for combined target.
CombinedTarget: The combination of classification target
(response map) and regression target (offset map).
Paper ref: Huang et al. The Devil is in the Details: Delving into
Unbiased Data Processing for Human Pose Estimation (CVPR 2020).
Args:
use_target_weight (bool): Option to use weighted MSE loss.
Different joint types may have different target weights.
loss_weight (float): Weight of the loss. Default: 1.0.
"""
def __init__(self, use_target_weight, loss_weight=1.0):
super().__init__()
self.criterion = nn.MSELoss(reduction='mean')
self.use_target_weight = use_target_weight
self.loss_weight = loss_weight
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| WangXin93/mmpose | CombinedTargetMSELoss | false | 1,199 | [
"Apache-2.0"
] | 0 | 28b6e9ac2f6ed195ab27fb04da2213fc885a5994 | https://github.com/WangXin93/mmpose/tree/28b6e9ac2f6ed195ab27fb04da2213fc885a5994 | import torch
import torch.nn as nn
class Model(nn.Module):
"""MSE loss for combined target.
CombinedTarget: The combination of classification target
(response map) and regression target (offset map).
Paper ref: Huang et al. The Devil is in the Details: Delving into
Unbiased Data Processing for Human Pose Estimation (CVPR 2020).
Args:
use_target_weight (bool): Option to use weighted MSE loss.
Different joint types may have different target weights.
loss_weight (float): Weight of the loss. Default: 1.0.
"""
def __init__(self, use_target_weight, loss_weight=1.0):
super().__init__()
self.criterion = nn.MSELoss(reduction='mean')
self.use_target_weight = use_target_weight
self.loss_weight = loss_weight
def forward(self, output, target, target_weight):
batch_size = output.size(0)
num_channels = output.size(1)
heatmaps_pred = output.reshape((batch_size, num_channels, -1)).split(
1, 1)
heatmaps_gt = target.reshape((batch_size, num_channels, -1)).split(1, 1
)
loss = 0.0
num_joints = num_channels // 3
for idx in range(num_joints):
heatmap_pred = heatmaps_pred[idx * 3].squeeze()
heatmap_gt = heatmaps_gt[idx * 3].squeeze()
offset_x_pred = heatmaps_pred[idx * 3 + 1].squeeze()
offset_x_gt = heatmaps_gt[idx * 3 + 1].squeeze()
offset_y_pred = heatmaps_pred[idx * 3 + 2].squeeze()
offset_y_gt = heatmaps_gt[idx * 3 + 2].squeeze()
if self.use_target_weight:
heatmap_pred = heatmap_pred * target_weight[:, idx]
heatmap_gt = heatmap_gt * target_weight[:, idx]
loss += 0.5 * self.criterion(heatmap_pred, heatmap_gt)
loss += 0.5 * self.criterion(heatmap_gt * offset_x_pred,
heatmap_gt * offset_x_gt)
loss += 0.5 * self.criterion(heatmap_gt * offset_y_pred,
heatmap_gt * offset_y_gt)
return loss / num_joints * self.loss_weight
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4]
|
AUGRUCell | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/su/csuyns6mfp66oncomzo5eikxszcgvnk2yq5hnh5qodgq2yxjppwg.py
# Topologically Sorted Source Nodes: [add, reset_gate, add_1, update_gate, mul, add_2, new_state, update_gate_1, sub, mul_2, mul_3, hy], Original ATen: [aten.add, aten.sigmoid, aten.mul, aten.tanh, aten.rsub]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# hy => add_3
# mul => mul
# mul_2 => mul_2
# mul_3 => mul_3
# new_state => tanh
# reset_gate => sigmoid
# sub => sub
# update_gate => sigmoid_1
# update_gate_1 => mul_1
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, %getitem_3), kwargs = {})
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_1, %getitem_4), kwargs = {})
# %sigmoid_1 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %getitem_5), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, %mul), kwargs = {})
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_2,), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %sigmoid_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %mul_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %primals_5), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %tanh), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_3), kwargs = {})
triton_poi_fused_add_mul_rsub_sigmoid_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_rsub_sigmoid_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_sigmoid_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 11, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_tanh_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 + x0 + (12*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (4 + x0 + (12*x1)), xmask)
tmp6 = tl.load(in_ptr0 + (x0 + (12*x1)), xmask)
tmp7 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (x0 + (12*x1)), xmask)
tmp12 = tl.load(in_ptr0 + (8 + x0 + (12*x1)), xmask)
tmp13 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + (8 + x0 + (12*x1)), xmask)
tmp19 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr4 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp11 = tl.sigmoid(tmp10)
tmp14 = tmp12 + tmp13
tmp16 = tmp11 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = libdevice.tanh(tmp17)
tmp20 = tmp19 * tmp5
tmp21 = 1.0
tmp22 = tmp21 - tmp20
tmp24 = tmp22 * tmp23
tmp25 = tmp20 * tmp18
tmp26 = tmp24 + tmp25
tl.store(out_ptr0 + (x2), tmp5, xmask)
tl.store(out_ptr1 + (x2), tmp11, xmask)
tl.store(out_ptr2 + (x2), tmp18, xmask)
tl.store(out_ptr3 + (x2), tmp26, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (12, 4), (4, 1))
assert_size_stride(primals_2, (12, ), (1, ))
assert_size_stride(primals_3, (64, 4), (4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (64, 4), (4, 1))
assert_size_stride(primals_6, (16, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 12), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [gh], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, primals_5, reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, reset_gate, add_1, update_gate, mul, add_2, new_state, update_gate_1, sub, mul_2, mul_3, hy], Original ATen: [aten.add, aten.sigmoid, aten.mul, aten.tanh, aten.rsub]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_rsub_sigmoid_tanh_0.run(buf0, primals_2, buf1, primals_6, primals_5, buf3, buf2, buf4, buf5, 256, grid=grid(256), stream=stream0)
del buf0
del primals_2
return (buf5, primals_3, primals_5, primals_6, reinterpret_tensor(buf1, (64, 4), (12, 1), 8), buf2, buf3, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((64, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics import *
class AUGRUCell(nn.Module):
""" Effect of GRU with attentional update gate (AUGRU)
Reference:
- Deep Interest Evolution Network for Click-Through Rate Prediction[J]. arXiv preprint arXiv:1809.03672, 2018.
"""
def __init__(self, input_size, hidden_size, bias=True):
super(AUGRUCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.weight_ih = nn.Parameter(torch.Tensor(3 * hidden_size, input_size)
)
self.register_parameter('weight_ih', self.weight_ih)
self.weight_hh = nn.Parameter(torch.Tensor(3 * hidden_size,
hidden_size))
self.register_parameter('weight_hh', self.weight_hh)
if bias:
self.bias_ih = nn.Parameter(torch.Tensor(3 * hidden_size))
self.register_parameter('bias_ih', self.bias_ih)
self.bias_hh = nn.Parameter(torch.Tensor(3 * hidden_size))
self.register_parameter('bias_ih', self.bias_hh)
for tensor in [self.bias_ih, self.bias_hh]:
nn.init.zeros_(tensor)
else:
self.register_parameter('bias_ih', None)
self.register_parameter('bias_hh', None)
def forward(self, inputs, hx, att_score):
gi = F.linear(inputs, self.weight_ih, self.bias_ih)
gh = F.linear(hx, self.weight_hh, self.bias_hh)
i_r, i_z, i_n = gi.chunk(3, 1)
h_r, h_z, h_n = gh.chunk(3, 1)
reset_gate = torch.sigmoid(i_r + h_r)
update_gate = torch.sigmoid(i_z + h_z)
new_state = torch.tanh(i_n + reset_gate * h_n)
att_score = att_score.view(-1, 1)
update_gate = att_score * update_gate
hy = (1.0 - update_gate) * hx + update_gate * new_state
return hy
def get_inputs():
return [torch.rand([64, 4]), torch.rand([64, 4]), torch.rand([16, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from sklearn.metrics import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_tanh_0(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 + x0 + 12 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (4 + x0 + 12 * x1), xmask)
tmp6 = tl.load(in_ptr0 + (x0 + 12 * x1), xmask)
tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (x0 + 12 * x1), xmask)
tmp12 = tl.load(in_ptr0 + (8 + x0 + 12 * x1), xmask)
tmp13 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + (8 + x0 + 12 * x1), xmask)
tmp19 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr4 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp11 = tl.sigmoid(tmp10)
tmp14 = tmp12 + tmp13
tmp16 = tmp11 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = libdevice.tanh(tmp17)
tmp20 = tmp19 * tmp5
tmp21 = 1.0
tmp22 = tmp21 - tmp20
tmp24 = tmp22 * tmp23
tmp25 = tmp20 * tmp18
tmp26 = tmp24 + tmp25
tl.store(out_ptr0 + x2, tmp5, xmask)
tl.store(out_ptr1 + x2, tmp11, xmask)
tl.store(out_ptr2 + x2, tmp18, xmask)
tl.store(out_ptr3 + x2, tmp26, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (12, 4), (4, 1))
assert_size_stride(primals_2, (12,), (1,))
assert_size_stride(primals_3, (64, 4), (4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (64, 4), (4, 1))
assert_size_stride(primals_6, (16, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 12), (12, 1), torch.float32)
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 12),
(1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 12), (12, 1), torch.float32)
extern_kernels.addmm(primals_2, primals_5, reinterpret_tensor(
primals_4, (4, 12), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_rsub_sigmoid_tanh_0[grid(256)](buf0,
primals_2, buf1, primals_6, primals_5, buf3, buf2, buf4, buf5,
256, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del primals_2
return buf5, primals_3, primals_5, primals_6, reinterpret_tensor(buf1,
(64, 4), (12, 1), 8), buf2, buf3, buf4
class AUGRUCellNew(nn.Module):
""" Effect of GRU with attentional update gate (AUGRU)
Reference:
- Deep Interest Evolution Network for Click-Through Rate Prediction[J]. arXiv preprint arXiv:1809.03672, 2018.
"""
def __init__(self, input_size, hidden_size, bias=True):
super(AUGRUCellNew, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.weight_ih = nn.Parameter(torch.Tensor(3 * hidden_size, input_size)
)
self.register_parameter('weight_ih', self.weight_ih)
self.weight_hh = nn.Parameter(torch.Tensor(3 * hidden_size,
hidden_size))
self.register_parameter('weight_hh', self.weight_hh)
if bias:
self.bias_ih = nn.Parameter(torch.Tensor(3 * hidden_size))
self.register_parameter('bias_ih', self.bias_ih)
self.bias_hh = nn.Parameter(torch.Tensor(3 * hidden_size))
self.register_parameter('bias_ih', self.bias_hh)
for tensor in [self.bias_ih, self.bias_hh]:
nn.init.zeros_(tensor)
else:
self.register_parameter('bias_ih', None)
self.register_parameter('bias_hh', None)
def forward(self, input_0, input_1, input_2):
primals_1 = self.weight_ih
primals_4 = self.weight_hh
primals_2 = self.bias_ih
primals_3 = input_0
primals_5 = input_1
primals_6 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| Sunmyunghan/Final_Project | AUGRUCell | false | 1,200 | [
"MIT"
] | 0 | 28cde293dc6d07521b2e1c5613b20444aea91d21 | https://github.com/Sunmyunghan/Final_Project/tree/28cde293dc6d07521b2e1c5613b20444aea91d21 | import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics import *
class Model(nn.Module):
""" Effect of GRU with attentional update gate (AUGRU)
Reference:
- Deep Interest Evolution Network for Click-Through Rate Prediction[J]. arXiv preprint arXiv:1809.03672, 2018.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.weight_ih = nn.Parameter(torch.Tensor(3 * hidden_size, input_size)
)
self.register_parameter('weight_ih', self.weight_ih)
self.weight_hh = nn.Parameter(torch.Tensor(3 * hidden_size,
hidden_size))
self.register_parameter('weight_hh', self.weight_hh)
if bias:
self.bias_ih = nn.Parameter(torch.Tensor(3 * hidden_size))
self.register_parameter('bias_ih', self.bias_ih)
self.bias_hh = nn.Parameter(torch.Tensor(3 * hidden_size))
self.register_parameter('bias_ih', self.bias_hh)
for tensor in [self.bias_ih, self.bias_hh]:
nn.init.zeros_(tensor)
else:
self.register_parameter('bias_ih', None)
self.register_parameter('bias_hh', None)
def forward(self, inputs, hx, att_score):
gi = F.linear(inputs, self.weight_ih, self.bias_ih)
gh = F.linear(hx, self.weight_hh, self.bias_hh)
i_r, i_z, i_n = gi.chunk(3, 1)
h_r, h_z, h_n = gh.chunk(3, 1)
reset_gate = torch.sigmoid(i_r + h_r)
update_gate = torch.sigmoid(i_z + h_z)
new_state = torch.tanh(i_n + reset_gate * h_n)
att_score = att_score.view(-1, 1)
update_gate = att_score * update_gate
hy = (1.0 - update_gate) * hx + update_gate * new_state
return hy
def get_inputs():
return [torch.rand([64, 4]), torch.rand([64, 4]), torch.rand([16, 4])]
def get_init_inputs():
return [4, 4]
|
BiInteractionPooling | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/bi/cbi2qodo47qltjis43e7m5he7aydnzfct5wg53dvjlfjgi3wz5zt.py
# Topologically Sorted Source Nodes: [sum_1, square_of_sum, mul, sum_of_square, sub, cross_term], Original ATen: [aten.sum, aten.pow, aten.mul, aten.sub]
# Source node to ATen node mapping:
# cross_term => mul_1
# mul => mul
# square_of_sum => pow_1
# sub => sub
# sum_1 => sum_1
# sum_of_square => sum_2
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg0_1, [1], True), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 2), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg0_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%pow_1, %sum_2), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, 0.5), kwargs = {})
triton_poi_fused_mul_pow_sub_sum_0 = async_compile.triton('triton_poi_fused_mul_pow_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_pow_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_pow_sub_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp0 * tmp0
tmp9 = tmp1 * tmp1
tmp10 = tmp8 + tmp9
tmp11 = tmp3 * tmp3
tmp12 = tmp10 + tmp11
tmp13 = tmp5 * tmp5
tmp14 = tmp12 + tmp13
tmp15 = tmp7 - tmp14
tmp16 = 0.5
tmp17 = tmp15 * tmp16
tl.store(out_ptr0 + (x2), tmp17, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_1, square_of_sum, mul, sum_of_square, sub, cross_term], Original ATen: [aten.sum, aten.pow, aten.mul, aten.sub]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_pow_sub_sum_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from sklearn.metrics import *
class BiInteractionPooling(nn.Module):
"""Bi-Interaction Layer used in Neural FM,compress the
pairwise element-wise product of features into one single vector.
Input shape
- A 3D tensor with shape:``(batch_size,field_size,embedding_size)``.
Output shape
- 3D tensor with shape: ``(batch_size,1,embedding_size)``.
References
- [He X, Chua T S. Neural factorization machines for sparse predictive analytics[C]//Proceedings of the 40th International ACM SIGIR conference on Research and Development in Information Retrieval. ACM, 2017: 355-364.](http://arxiv.org/abs/1708.05027)
"""
def __init__(self):
super(BiInteractionPooling, self).__init__()
def forward(self, inputs):
concated_embeds_value = inputs
square_of_sum = torch.pow(torch.sum(concated_embeds_value, dim=1,
keepdim=True), 2)
sum_of_square = torch.sum(concated_embeds_value *
concated_embeds_value, dim=1, keepdim=True)
cross_term = 0.5 * (square_of_sum - sum_of_square)
return cross_term
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from sklearn.metrics import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_pow_sub_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp0 * tmp0
tmp9 = tmp1 * tmp1
tmp10 = tmp8 + tmp9
tmp11 = tmp3 * tmp3
tmp12 = tmp10 + tmp11
tmp13 = tmp5 * tmp5
tmp14 = tmp12 + tmp13
tmp15 = tmp7 - tmp14
tmp16 = 0.5
tmp17 = tmp15 * tmp16
tl.store(out_ptr0 + x2, tmp17, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_pow_sub_sum_0[grid(64)](arg0_1, buf0, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
return buf0,
class BiInteractionPoolingNew(nn.Module):
"""Bi-Interaction Layer used in Neural FM,compress the
pairwise element-wise product of features into one single vector.
Input shape
- A 3D tensor with shape:``(batch_size,field_size,embedding_size)``.
Output shape
- 3D tensor with shape: ``(batch_size,1,embedding_size)``.
References
- [He X, Chua T S. Neural factorization machines for sparse predictive analytics[C]//Proceedings of the 40th International ACM SIGIR conference on Research and Development in Information Retrieval. ACM, 2017: 355-364.](http://arxiv.org/abs/1708.05027)
"""
def __init__(self):
super(BiInteractionPoolingNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Sunmyunghan/Final_Project | BiInteractionPooling | false | 1,201 | [
"MIT"
] | 0 | 28cde293dc6d07521b2e1c5613b20444aea91d21 | https://github.com/Sunmyunghan/Final_Project/tree/28cde293dc6d07521b2e1c5613b20444aea91d21 | import torch
import torch.nn as nn
from sklearn.metrics import *
class Model(nn.Module):
"""Bi-Interaction Layer used in Neural FM,compress the
pairwise element-wise product of features into one single vector.
Input shape
- A 3D tensor with shape:``(batch_size,field_size,embedding_size)``.
Output shape
- 3D tensor with shape: ``(batch_size,1,embedding_size)``.
References
- [He X, Chua T S. Neural factorization machines for sparse predictive analytics[C]//Proceedings of the 40th International ACM SIGIR conference on Research and Development in Information Retrieval. ACM, 2017: 355-364.](http://arxiv.org/abs/1708.05027)
"""
def __init__(self):
super().__init__()
def forward(self, inputs):
concated_embeds_value = inputs
square_of_sum = torch.pow(torch.sum(concated_embeds_value, dim=1,
keepdim=True), 2)
sum_of_square = torch.sum(concated_embeds_value *
concated_embeds_value, dim=1, keepdim=True)
cross_term = 0.5 * (square_of_sum - sum_of_square)
return cross_term
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
AGRUCell | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/by/cbypqeb7lfdlbth5j2ww7h2bluyiqc2nrbnc76btfynxntusq5wb.py
# Topologically Sorted Source Nodes: [add, reset_gate, mul, add_1, new_state, sub, mul_1, mul_2, hy], Original ATen: [aten.add, aten.sigmoid, aten.mul, aten.tanh, aten.rsub, aten.tanh_backward]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# hy => add_2
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# new_state => tanh
# reset_gate => sigmoid
# sub => sub
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, %getitem_3), kwargs = {})
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %getitem_5), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, %mul), kwargs = {})
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %view), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %primals_6), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %tanh), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_2), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh, %tanh), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %mul_4), kwargs = {})
triton_poi_fused_add_mul_rsub_sigmoid_tanh_tanh_backward_0 = async_compile.triton('triton_poi_fused_add_mul_rsub_sigmoid_tanh_tanh_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_sigmoid_tanh_tanh_backward_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_tanh_tanh_backward_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (12*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + (12*x1)), xmask)
tmp6 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr4 + (x2), xmask)
tmp11 = tl.load(in_ptr0 + (8 + x0 + (12*x1)), xmask)
tmp12 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr2 + (8 + x0 + (12*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp7 = 1.0
tmp8 = tmp7 - tmp6
tmp10 = tmp8 * tmp9
tmp13 = tmp11 + tmp12
tmp15 = tmp5 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = libdevice.tanh(tmp16)
tmp18 = tmp6 * tmp17
tmp19 = tmp10 + tmp18
tmp20 = tmp17 * tmp17
tmp21 = tmp7 - tmp20
tl.store(out_ptr0 + (x2), tmp5, xmask)
tl.store(out_ptr1 + (x2), tmp19, xmask)
tl.store(out_ptr2 + (x2), tmp21, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (12, 4), (4, 1))
assert_size_stride(primals_2, (12, ), (1, ))
assert_size_stride(primals_3, (16, 4), (4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12, ), (1, ))
assert_size_stride(primals_6, (16, 4), (4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 12), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [gh], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, primals_6, reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, reset_gate, mul, add_1, new_state, sub, mul_1, mul_2, hy], Original ATen: [aten.add, aten.sigmoid, aten.mul, aten.tanh, aten.rsub, aten.tanh_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_rsub_sigmoid_tanh_tanh_backward_0.run(buf0, primals_2, buf1, primals_7, primals_6, buf2, buf3, buf4, 64, grid=grid(64), stream=stream0)
del buf0
del primals_2
return (buf3, primals_3, primals_6, primals_7, reinterpret_tensor(buf1, (16, 4), (12, 1), 8), buf2, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics import *
class AGRUCell(nn.Module):
""" Attention based GRU (AGRU)
Reference:
- Deep Interest Evolution Network for Click-Through Rate Prediction[J]. arXiv preprint arXiv:1809.03672, 2018.
"""
def __init__(self, input_size, hidden_size, bias=True):
super(AGRUCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.weight_ih = nn.Parameter(torch.Tensor(3 * hidden_size, input_size)
)
self.register_parameter('weight_ih', self.weight_ih)
self.weight_hh = nn.Parameter(torch.Tensor(3 * hidden_size,
hidden_size))
self.register_parameter('weight_hh', self.weight_hh)
if bias:
self.bias_ih = nn.Parameter(torch.Tensor(3 * hidden_size))
self.register_parameter('bias_ih', self.bias_ih)
self.bias_hh = nn.Parameter(torch.Tensor(3 * hidden_size))
self.register_parameter('bias_hh', self.bias_hh)
for tensor in [self.bias_ih, self.bias_hh]:
nn.init.zeros_(tensor)
else:
self.register_parameter('bias_ih', None)
self.register_parameter('bias_hh', None)
def forward(self, inputs, hx, att_score):
gi = F.linear(inputs, self.weight_ih, self.bias_ih)
gh = F.linear(hx, self.weight_hh, self.bias_hh)
i_r, _, i_n = gi.chunk(3, 1)
h_r, _, h_n = gh.chunk(3, 1)
reset_gate = torch.sigmoid(i_r + h_r)
new_state = torch.tanh(i_n + reset_gate * h_n)
att_score = att_score.view(-1, 1)
hy = (1.0 - att_score) * hx + att_score * new_state
return hy
def get_inputs():
return [torch.rand([16, 4]), torch.rand([16, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from sklearn.metrics import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_tanh_tanh_backward_0(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 12 * x1), xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + 12 * x1), xmask)
tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr4 + x2, xmask)
tmp11 = tl.load(in_ptr0 + (8 + x0 + 12 * x1), xmask)
tmp12 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr2 + (8 + x0 + 12 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp7 = 1.0
tmp8 = tmp7 - tmp6
tmp10 = tmp8 * tmp9
tmp13 = tmp11 + tmp12
tmp15 = tmp5 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = libdevice.tanh(tmp16)
tmp18 = tmp6 * tmp17
tmp19 = tmp10 + tmp18
tmp20 = tmp17 * tmp17
tmp21 = tmp7 - tmp20
tl.store(out_ptr0 + x2, tmp5, xmask)
tl.store(out_ptr1 + x2, tmp19, xmask)
tl.store(out_ptr2 + x2, tmp21, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (12, 4), (4, 1))
assert_size_stride(primals_2, (12,), (1,))
assert_size_stride(primals_3, (16, 4), (4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12,), (1,))
assert_size_stride(primals_6, (16, 4), (4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 12),
(1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.addmm(primals_5, primals_6, reinterpret_tensor(
primals_4, (4, 12), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_rsub_sigmoid_tanh_tanh_backward_0[grid(64)](
buf0, primals_2, buf1, primals_7, primals_6, buf2, buf3, buf4,
64, XBLOCK=64, num_warps=1, num_stages=1)
del buf0
del primals_2
return buf3, primals_3, primals_6, primals_7, reinterpret_tensor(buf1,
(16, 4), (12, 1), 8), buf2, buf4
class AGRUCellNew(nn.Module):
""" Attention based GRU (AGRU)
Reference:
- Deep Interest Evolution Network for Click-Through Rate Prediction[J]. arXiv preprint arXiv:1809.03672, 2018.
"""
def __init__(self, input_size, hidden_size, bias=True):
super(AGRUCellNew, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.weight_ih = nn.Parameter(torch.Tensor(3 * hidden_size, input_size)
)
self.register_parameter('weight_ih', self.weight_ih)
self.weight_hh = nn.Parameter(torch.Tensor(3 * hidden_size,
hidden_size))
self.register_parameter('weight_hh', self.weight_hh)
if bias:
self.bias_ih = nn.Parameter(torch.Tensor(3 * hidden_size))
self.register_parameter('bias_ih', self.bias_ih)
self.bias_hh = nn.Parameter(torch.Tensor(3 * hidden_size))
self.register_parameter('bias_hh', self.bias_hh)
for tensor in [self.bias_ih, self.bias_hh]:
nn.init.zeros_(tensor)
else:
self.register_parameter('bias_ih', None)
self.register_parameter('bias_hh', None)
def forward(self, input_0, input_1, input_2):
primals_1 = self.weight_ih
primals_4 = self.weight_hh
primals_2 = self.bias_ih
primals_5 = self.bias_hh
primals_3 = input_0
primals_6 = input_1
primals_7 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| Sunmyunghan/Final_Project | AGRUCell | false | 1,202 | [
"MIT"
] | 0 | 28cde293dc6d07521b2e1c5613b20444aea91d21 | https://github.com/Sunmyunghan/Final_Project/tree/28cde293dc6d07521b2e1c5613b20444aea91d21 | import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics import *
class Model(nn.Module):
""" Attention based GRU (AGRU)
Reference:
- Deep Interest Evolution Network for Click-Through Rate Prediction[J]. arXiv preprint arXiv:1809.03672, 2018.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.weight_ih = nn.Parameter(torch.Tensor(3 * hidden_size, input_size)
)
self.register_parameter('weight_ih', self.weight_ih)
self.weight_hh = nn.Parameter(torch.Tensor(3 * hidden_size,
hidden_size))
self.register_parameter('weight_hh', self.weight_hh)
if bias:
self.bias_ih = nn.Parameter(torch.Tensor(3 * hidden_size))
self.register_parameter('bias_ih', self.bias_ih)
self.bias_hh = nn.Parameter(torch.Tensor(3 * hidden_size))
self.register_parameter('bias_hh', self.bias_hh)
for tensor in [self.bias_ih, self.bias_hh]:
nn.init.zeros_(tensor)
else:
self.register_parameter('bias_ih', None)
self.register_parameter('bias_hh', None)
def forward(self, inputs, hx, att_score):
gi = F.linear(inputs, self.weight_ih, self.bias_ih)
gh = F.linear(hx, self.weight_hh, self.bias_hh)
i_r, _, i_n = gi.chunk(3, 1)
h_r, _, h_n = gh.chunk(3, 1)
reset_gate = torch.sigmoid(i_r + h_r)
new_state = torch.tanh(i_n + reset_gate * h_n)
att_score = att_score.view(-1, 1)
hy = (1.0 - att_score) * hx + att_score * new_state
return hy
def get_inputs():
return [torch.rand([16, 4]), torch.rand([16, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4, 4]
|
cha_loss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/b3/cb3k2n2etvob7szydtuektkv7vvmqqhpd7b5usq3fmxyrvk5c5qr.py
# Topologically Sorted Source Nodes: [sub, abs_1, pow_1, diff, out, loss], Original ATen: [aten.sub, aten.abs, aten.pow, aten.add, aten.sqrt, aten.mean]
# Source node to ATen node mapping:
# abs_1 => abs_1
# diff => add
# loss => mean
# out => sqrt
# pow_1 => pow_1
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%abs_1, 2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_1, 1e-06), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sqrt,), kwargs = {})
triton_per_fused_abs_add_mean_pow_sqrt_sub_0 = async_compile.triton('triton_per_fused_abs_add_mean_pow_sqrt_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_mean_pow_sqrt_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_add_mean_pow_sqrt_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tmp3 * tmp3
tmp5 = 1e-06
tmp6 = tmp4 + tmp5
tmp7 = libdevice.sqrt(tmp6)
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = 256.0
tmp12 = tmp10 / tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp12, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sub, abs_1, pow_1, diff, out, loss], Original ATen: [aten.sub, aten.abs, aten.pow, aten.add, aten.sqrt, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_abs_add_mean_pow_sqrt_sub_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.optim
import torch.cuda
class cha_loss(nn.Module):
def __init__(self, eps=0.001):
super(cha_loss, self).__init__()
self.eps = eps
return
def forward(self, inp, target):
diff = torch.abs(inp - target) ** 2 + self.eps ** 2
out = torch.sqrt(diff)
loss = torch.mean(out)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.optim
import torch.cuda
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_add_mean_pow_sqrt_sub_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tmp3 * tmp3
tmp5 = 1e-06
tmp6 = tmp4 + tmp5
tmp7 = libdevice.sqrt(tmp6)
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = 256.0
tmp12 = tmp10 / tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_add_mean_pow_sqrt_sub_0[grid(1)](buf1, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class cha_lossNew(nn.Module):
def __init__(self, eps=0.001):
super(cha_lossNew, self).__init__()
self.eps = eps
return
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| Weepingchestnut/OVSR | cha_loss | false | 1,203 | [
"Apache-2.0"
] | 0 | 11554a3b1072d50a8c88cf59b4b986df1fda73f9 | https://github.com/Weepingchestnut/OVSR/tree/11554a3b1072d50a8c88cf59b4b986df1fda73f9 | import torch
import torch.nn as nn
import torch.optim
import torch.cuda
class Model(nn.Module):
def __init__(self, eps=0.001):
super().__init__()
self.eps = eps
return
def forward(self, inp, target):
diff = torch.abs(inp - target) ** 2 + self.eps ** 2
out = torch.sqrt(diff)
loss = torch.mean(out)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
AvgReducePool1d | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/jn/cjnv5uptstyk4xaisuiw5kf5lbz3m33meejxhbfbsta5ozps7ijn.py
# Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# mean => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [2]), kwargs = {})
triton_poi_fused_mean_0 = async_compile.triton('triton_poi_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_poi_fused_mean_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class AvgReducePool1d(nn.Module):
"""A subclass of :torch_nn:`Module`.
Avg Pool layer for 1D inputs. The same as :torch_nn:`AvgPool1d` except that
the pooling dimension is entirely reduced (i.e., `pool_size=input_length`).
"""
def forward(self, input: 'torch.Tensor') ->torch.Tensor:
return torch.mean(input, dim=2)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class AvgReducePool1dNew(nn.Module):
"""A subclass of :torch_nn:`Module`.
Avg Pool layer for 1D inputs. The same as :torch_nn:`AvgPool1d` except that
the pooling dimension is entirely reduced (i.e., `pool_size=input_length`).
"""
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| WangXinglin/BIT_framework | AvgReducePool1d | false | 1,204 | [
"MIT"
] | 0 | 1484874fcd00d052c7536789dec95050b480b25d | https://github.com/WangXinglin/BIT_framework/tree/1484874fcd00d052c7536789dec95050b480b25d | import torch
from torch import nn
class Model(nn.Module):
"""A subclass of :torch_nn:`Module`.
Avg Pool layer for 1D inputs. The same as :torch_nn:`AvgPool1d` except that
the pooling dimension is entirely reduced (i.e., `pool_size=input_length`).
"""
def forward(self, input: 'torch.Tensor') ->torch.Tensor:
return torch.mean(input, dim=2)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
T5LayerNorm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/zc/czcy4atdkmj5kmy4jxz74ygrnoedga35xsqkibq72stpidzlybko.py
# Topologically Sorted Source Nodes: [pow_1, mean, add, sqrt, x, mul], Original ATen: [aten.pow, aten.mean, aten.add, aten.sqrt, aten.div, aten.mul]
# Source node to ATen node mapping:
# add => add
# mean => mean
# mul => mul
# pow_1 => pow_1
# sqrt => sqrt
# x => div
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1], True), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, 1e-05), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %sqrt), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %div), kwargs = {})
triton_poi_fused_add_div_mean_mul_pow_sqrt_0 = async_compile.triton('triton_poi_fused_add_div_mean_mul_pow_sqrt_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_pow_sqrt_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_sqrt_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = 4.0
tmp14 = tmp12 / tmp13
tmp15 = 1e-05
tmp16 = tmp14 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp18 = tmp1 / tmp17
tmp19 = tmp0 * tmp18
tl.store(out_ptr0 + (x2), tmp19, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, mean, add, sqrt, x, mul], Original ATen: [aten.pow, aten.mean, aten.add, aten.sqrt, aten.div, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_pow_sqrt_0.run(primals_2, primals_1, buf0, 256, grid=grid(256), stream=stream0)
del primals_2
return (buf0, primals_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class T5LayerNorm(nn.Module):
""" Custom LayerNorm for T5 with no mean subtraction and no bias.
"""
def __init__(self, input_size: 'int', eps: 'float'=1e-05):
super().__init__()
self.w = nn.Parameter(torch.ones(input_size))
self.eps = eps
def forward(self, x: 'torch.Tensor'):
x = x / torch.sqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
return self.w * x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_sqrt_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = 4.0
tmp14 = tmp12 / tmp13
tmp15 = 1e-05
tmp16 = tmp14 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp18 = tmp1 / tmp17
tmp19 = tmp0 * tmp18
tl.store(out_ptr0 + x2, tmp19, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_pow_sqrt_0[grid(256)](primals_2,
primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf0, primals_1
class T5LayerNormNew(nn.Module):
""" Custom LayerNorm for T5 with no mean subtraction and no bias.
"""
def __init__(self, input_size: 'int', eps: 'float'=1e-05):
super().__init__()
self.w = nn.Parameter(torch.ones(input_size))
self.eps = eps
def forward(self, input_0):
primals_2 = self.w
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| WangXinglin/BIT_framework | T5LayerNorm | false | 1,205 | [
"MIT"
] | 0 | 1484874fcd00d052c7536789dec95050b480b25d | https://github.com/WangXinglin/BIT_framework/tree/1484874fcd00d052c7536789dec95050b480b25d | import torch
from torch import nn
class Model(nn.Module):
""" Custom LayerNorm for T5 with no mean subtraction and no bias.
"""
def __init__(self, input_size: 'int', eps: 'float'=1e-05):
super().__init__()
self.w = nn.Parameter(torch.ones(input_size))
self.eps = eps
def forward(self, x: 'torch.Tensor'):
x = x / torch.sqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
return self.w * x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
GraphConvolution | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/q5/cq5wusjiorttrifgkbgmb575ri5bohmulexkpd7lpcdrnw7myr2f.py
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# output_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%bmm,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [support], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm]
extern_kernels.bmm(primals_3, reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), out=buf1)
del buf0
buf2 = buf1; del buf1 # reuse
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf2, buf3, 64, grid=grid(64), stream=stream0)
return (buf2, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf3, reinterpret_tensor(primals_3, (4, 4, 4), (16, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.nn import Module
import torch
import torch.nn.functional as F
from torch.nn.modules.module import Module
import torch.nn.modules.loss
import torch.utils.data
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, dropout=0.0, act=F.relu):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.dropout = dropout
self.act = act
self.linear = torch.nn.Linear(in_features, out_features, bias=False)
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.linear.weight)
def forward(self, input, adj):
input = F.dropout(input, self.dropout, self.training)
support = self.linear(input)
output = torch.bmm(adj, support)
output = self.act(output)
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.nn import Module
import torch.nn.functional as F
from torch.nn.modules.module import Module
import torch.nn.modules.loss
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(primals_3, reinterpret_tensor(buf0, (4, 4, 4), (
16, 4, 1), 0), out=buf1)
del buf0
buf2 = buf1
del buf1
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(64)](buf2, buf3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
return buf2, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), buf3, reinterpret_tensor(primals_3, (4, 4, 4), (16, 1, 4), 0)
class GraphConvolutionNew(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, dropout=0.0, act=F.relu):
super(GraphConvolutionNew, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.dropout = dropout
self.act = act
self.linear = torch.nn.Linear(in_features, out_features, bias=False)
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.linear.weight)
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
def forward(self, input_0, input_1):
primals_2 = self.linear.weight
primals_1 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
| WanyuGroup/CVPR2022-OrphicX | GraphConvolution | false | 1,206 | [
"MIT"
] | 0 | 98d8d8259439c45661573e575cf956331df16abc | https://github.com/WanyuGroup/CVPR2022-OrphicX/tree/98d8d8259439c45661573e575cf956331df16abc | from torch.nn import Module
import torch
import torch.nn.functional as F
from torch.nn.modules.module import Module
import torch.nn.modules.loss
import torch.utils.data
class Model(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, dropout=0.0, act=F.relu):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.dropout = dropout
self.act = act
self.linear = torch.nn.Linear(in_features, out_features, bias=False)
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.linear.weight)
def forward(self, input, adj):
input = F.dropout(input, self.dropout, self.training)
support = self.linear(input)
output = torch.bmm(adj, support)
output = self.act(output)
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Normalize | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/74/c74cgsnsrc4jmxl2qqe5j2mmuphscf6e56iqf2iy24pro6mj73sp.py
# Topologically Sorted Source Nodes: [normalize], Original ATen: [aten.div]
# Source node to ATen node mapping:
# normalize => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %expand), kwargs = {})
triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-08
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + (x3), tmp15, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [normalize], Original ATen: [aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.nn import functional as F
class Normalize(nn.Module):
"""Performs :math:`L_p` normalization of inputs over specified dimension.
Does:
.. math::
v = \\frac{v}{\\max(\\lVert v \\rVert_p, \\epsilon)}
for each subtensor v over dimension dim of input. Each subtensor is
flattened into a vector, i.e. :math:`\\lVert v \\rVert_p` is not a matrix
norm.
With default arguments normalizes over the second dimension with Euclidean
norm.
Args:
p (float): the exponent value in the norm formulation. Default: 2
dim (int): the dimension to reduce. Default: 1
"""
def __init__(self, p=2, dim=1):
super(Normalize, self).__init__()
self.p = p
self.dim = dim
def forward(self, x):
return F.normalize(x, self.p, self.dim, eps=1e-08)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-08
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x3, tmp15, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class NormalizeNew(nn.Module):
"""Performs :math:`L_p` normalization of inputs over specified dimension.
Does:
.. math::
v = \\frac{v}{\\max(\\lVert v \\rVert_p, \\epsilon)}
for each subtensor v over dimension dim of input. Each subtensor is
flattened into a vector, i.e. :math:`\\lVert v \\rVert_p` is not a matrix
norm.
With default arguments normalizes over the second dimension with Euclidean
norm.
Args:
p (float): the exponent value in the norm formulation. Default: 2
dim (int): the dimension to reduce. Default: 1
"""
def __init__(self, p=2, dim=1):
super(NormalizeNew, self).__init__()
self.p = p
self.dim = dim
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| WillyChen123/CDFNet | Normalize | false | 1,207 | [
"MIT"
] | 0 | 12d6b288aa2a8301683395a75bd44a7be44b7f2a | https://github.com/WillyChen123/CDFNet/tree/12d6b288aa2a8301683395a75bd44a7be44b7f2a | import torch
import torch.nn as nn
from torch.nn import functional as F
class Model(nn.Module):
"""Performs :math:`L_p` normalization of inputs over specified dimension.
Does:
.. math::
v = \\frac{v}{\\max(\\lVert v \\rVert_p, \\epsilon)}
for each subtensor v over dimension dim of input. Each subtensor is
flattened into a vector, i.e. :math:`\\lVert v \\rVert_p` is not a matrix
norm.
With default arguments normalizes over the second dimension with Euclidean
norm.
Args:
p (float): the exponent value in the norm formulation. Default: 2
dim (int): the dimension to reduce. Default: 1
"""
def __init__(self, p=2, dim=1):
super().__init__()
self.p = p
self.dim = dim
def forward(self, x):
return F.normalize(x, self.p, self.dim, eps=1e-08)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
InnerProductDecoder | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/hn/chnsjoso43zxwc4rs5k2ujcpccyhrverh5okecwh4vbrmpmq4kaa.py
# Topologically Sorted Source Nodes: [adj], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# adj => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%bmm,), kwargs = {})
triton_poi_fused_sigmoid_0 = async_compile.triton('triton_poi_fused_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm]
extern_kernels.bmm(arg0_1, reinterpret_tensor(arg0_1, (4, 4, 4), (16, 1, 4), 0), out=buf0)
del arg0_1
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [adj], Original ATen: [aten.sigmoid]
stream0 = get_raw_stream(0)
triton_poi_fused_sigmoid_0.run(buf1, 64, grid=grid(64), stream=stream0)
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.nn.modules.loss
import torch.utils.data
class InnerProductDecoder(nn.Module):
"""Decoder for using inner product for prediction."""
def __init__(self, dropout, act=torch.sigmoid):
super(InnerProductDecoder, self).__init__()
self.dropout = dropout
self.act = act
def forward(self, z):
z = F.dropout(z, self.dropout, training=self.training)
adj = self.act(torch.bmm(z, torch.transpose(z, 1, 2)))
return adj
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dropout': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.modules.loss
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(arg0_1, reinterpret_tensor(arg0_1, (4, 4, 4), (
16, 1, 4), 0), out=buf0)
del arg0_1
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_sigmoid_0[grid(64)](buf1, 64, XBLOCK=64, num_warps
=1, num_stages=1)
return buf1,
class InnerProductDecoderNew(nn.Module):
"""Decoder for using inner product for prediction."""
def __init__(self, dropout, act=torch.sigmoid):
super(InnerProductDecoderNew, self).__init__()
self.dropout = dropout
self.act = act
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| WanyuGroup/CVPR2022-OrphicX | InnerProductDecoder | false | 1,208 | [
"MIT"
] | 0 | 98d8d8259439c45661573e575cf956331df16abc | https://github.com/WanyuGroup/CVPR2022-OrphicX/tree/98d8d8259439c45661573e575cf956331df16abc | import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.nn.modules.loss
import torch.utils.data
class Model(nn.Module):
"""Decoder for using inner product for prediction."""
def __init__(self, dropout, act=torch.sigmoid):
super().__init__()
self.dropout = dropout
self.act = act
def forward(self, z):
z = F.dropout(z, self.dropout, training=self.training)
adj = self.act(torch.bmm(z, torch.transpose(z, 1, 2)))
return adj
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [0.5]
|
InnerProductDecoderMLP | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/by/cbyavqfpm6cctsjn76scubidnajec26whr355vus7lq6jaaa5rcx.py
# Topologically Sorted Source Nodes: [z], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# z => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/6n/c6ny6zeq6oiux67xx5maj7x7dofqjbxxtczkhwvlqya6ihwdcyou.py
# Topologically Sorted Source Nodes: [z_1], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# z_1 => sigmoid
# Graph fragment:
# %sigmoid : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_3,), kwargs = {})
triton_poi_fused_sigmoid_1 = async_compile.triton('triton_poi_fused_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/2k/c2kxppdwrjhrnsxdvspyvbppardk6v3troxohx47zqnk2haa3fjg.py
# Topologically Sorted Source Nodes: [adj], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# adj => sigmoid_1
# Graph fragment:
# %sigmoid_1 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%bmm,), kwargs = {})
triton_poi_fused_sigmoid_2 = async_compile.triton('triton_poi_fused_sigmoid_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0); del buf0 # reuse
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [z], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf6, 64, grid=grid(64), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [z_1], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_1.run(buf3, primals_5, 64, grid=grid(64), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm]
extern_kernels.bmm(buf3, reinterpret_tensor(buf3, (4, 4, 4), (16, 1, 4), 0), out=buf4)
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [adj], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_2.run(buf5, 64, grid=grid(64), stream=stream0)
return (buf5, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(buf1, (16, 4), (4, 1), 0), buf3, buf5, primals_4, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.nn.modules.loss
import torch.utils.data
class InnerProductDecoderMLP(nn.Module):
"""Decoder for using inner product for prediction."""
def __init__(self, input_dim, hidden_dim1, hidden_dim2, dropout, act=
torch.sigmoid):
super(InnerProductDecoderMLP, self).__init__()
self.fc = nn.Linear(input_dim, hidden_dim1)
self.fc2 = nn.Linear(hidden_dim1, hidden_dim2)
self.dropout = dropout
self.act = act
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.fc.weight)
torch.nn.init.zeros_(self.fc.bias)
torch.nn.init.xavier_uniform_(self.fc2.weight)
torch.nn.init.zeros_(self.fc2.bias)
def forward(self, z):
z = F.relu(self.fc(z))
z = torch.sigmoid(self.fc2(z))
z = F.dropout(z, self.dropout, training=self.training)
adj = self.act(torch.bmm(z, torch.transpose(z, 1, 2)))
return adj
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'hidden_dim1': 4, 'hidden_dim2': 4,
'dropout': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.modules.loss
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0)
del buf0
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(64)](buf1,
primals_2, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0)
del buf2
triton_poi_fused_sigmoid_1[grid(64)](buf3, primals_5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf3, reinterpret_tensor(buf3, (4, 4, 4), (16, 1,
4), 0), out=buf4)
buf5 = buf4
del buf4
triton_poi_fused_sigmoid_2[grid(64)](buf5, 64, XBLOCK=64, num_warps
=1, num_stages=1)
return buf5, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0
), reinterpret_tensor(buf1, (16, 4), (4, 1), 0
), buf3, buf5, primals_4, buf6
class InnerProductDecoderMLPNew(nn.Module):
"""Decoder for using inner product for prediction."""
def __init__(self, input_dim, hidden_dim1, hidden_dim2, dropout, act=
torch.sigmoid):
super(InnerProductDecoderMLPNew, self).__init__()
self.fc = nn.Linear(input_dim, hidden_dim1)
self.fc2 = nn.Linear(hidden_dim1, hidden_dim2)
self.dropout = dropout
self.act = act
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.fc.weight)
torch.nn.init.zeros_(self.fc.bias)
torch.nn.init.xavier_uniform_(self.fc2.weight)
torch.nn.init.zeros_(self.fc2.bias)
def forward(self, input_0):
primals_1 = self.fc.weight
primals_2 = self.fc.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| WanyuGroup/CVPR2022-OrphicX | InnerProductDecoderMLP | false | 1,209 | [
"MIT"
] | 0 | 98d8d8259439c45661573e575cf956331df16abc | https://github.com/WanyuGroup/CVPR2022-OrphicX/tree/98d8d8259439c45661573e575cf956331df16abc | import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.nn.modules.loss
import torch.utils.data
class Model(nn.Module):
"""Decoder for using inner product for prediction."""
def __init__(self, input_dim, hidden_dim1, hidden_dim2, dropout, act=
torch.sigmoid):
super().__init__()
self.fc = nn.Linear(input_dim, hidden_dim1)
self.fc2 = nn.Linear(hidden_dim1, hidden_dim2)
self.dropout = dropout
self.act = act
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.fc.weight)
torch.nn.init.zeros_(self.fc.bias)
torch.nn.init.xavier_uniform_(self.fc2.weight)
torch.nn.init.zeros_(self.fc2.bias)
def forward(self, z):
z = F.relu(self.fc(z))
z = torch.sigmoid(self.fc2(z))
z = F.dropout(z, self.dropout, training=self.training)
adj = self.act(torch.bmm(z, torch.transpose(z, 1, 2)))
return adj
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'hidden_dim1': 4, 'hidden_dim2': 4,
'dropout': 0.5}]
|
Accuracy | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/jd/cjdnaobowqernajenb4axacuvfjpuu3bnchwmpvbem4jevqy6y4v.py
# Topologically Sorted Source Nodes: [predictions], Original ATen: [aten.argmax]
# Source node to ATen node mapping:
# predictions => argmax
# Graph fragment:
# %argmax : [num_users=1] = call_function[target=torch.ops.aten.argmax.default](args = (%arg0_1, -1), kwargs = {})
triton_poi_fused_argmax_0 = async_compile.triton('triton_poi_fused_argmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_argmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_argmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 > tmp1
tmp3 = tmp0 == tmp1
tmp4 = tmp0 != tmp0
tmp5 = tmp1 != tmp1
tmp6 = tmp4 > tmp5
tmp7 = tmp2 | tmp6
tmp8 = tmp4 & tmp5
tmp9 = tmp3 | tmp8
tmp10 = tl.full([1], 0, tl.int64)
tmp11 = tl.full([1], 1, tl.int64)
tmp12 = tmp10 < tmp11
tmp13 = tmp9 & tmp12
tmp14 = tmp7 | tmp13
tmp15 = tl.where(tmp14, tmp0, tmp1)
tmp16 = tl.where(tmp14, tmp10, tmp11)
tmp18 = tmp15 > tmp17
tmp19 = tmp15 == tmp17
tmp20 = tmp15 != tmp15
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1], 2, tl.int64)
tmp27 = tmp16 < tmp26
tmp28 = tmp25 & tmp27
tmp29 = tmp23 | tmp28
tmp30 = tl.where(tmp29, tmp15, tmp17)
tmp31 = tl.where(tmp29, tmp16, tmp26)
tmp33 = tmp30 > tmp32
tmp34 = tmp30 == tmp32
tmp35 = tmp30 != tmp30
tmp36 = tmp32 != tmp32
tmp37 = tmp35 > tmp36
tmp38 = tmp33 | tmp37
tmp39 = tmp35 & tmp36
tmp40 = tmp34 | tmp39
tmp41 = tl.full([1], 3, tl.int64)
tmp42 = tmp31 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tmp38 | tmp43
tmp45 = tl.where(tmp44, tmp30, tmp32)
tmp46 = tl.where(tmp44, tmp31, tmp41)
tl.store(out_ptr0 + (x0), tmp46, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/vd/cvdkoxf2wkd5oryi23fngtr24eavrkiw46qraujejvcpyvdhsfen.py
# Topologically Sorted Source Nodes: [eq, valid_mask, correct, sum_1, float_2, sum_2, float_3, truediv], Original ATen: [aten.eq, aten.ne, aten.mul, aten.sum, aten._to_copy, aten.div]
# Source node to ATen node mapping:
# correct => mul
# eq => eq
# float_2 => convert_element_type
# float_3 => convert_element_type_1
# sum_1 => sum_1
# sum_2 => sum_2
# truediv => div
# valid_mask => ne
# Graph fragment:
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%argmax, %arg1_1), kwargs = {})
# %ne : [num_users=2] = call_function[target=torch.ops.aten.ne.Scalar](args = (%arg1_1, -100), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%eq, %ne), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%sum_1, torch.float32), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%ne,), kwargs = {})
# %convert_element_type_1 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%sum_2, torch.float32), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%convert_element_type, %convert_element_type_1), kwargs = {})
triton_per_fused__to_copy_div_eq_mul_ne_sum_1 = async_compile.triton('triton_per_fused__to_copy_div_eq_mul_ne_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_div_eq_mul_ne_sum_1', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__to_copy_div_eq_mul_ne_sum_1(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex % 64
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (r2), None)
tmp1 = tmp0.to(tl.float32)
tmp3 = tmp1 == tmp2
tmp4 = -100.0
tmp5 = tmp2 != tmp4
tmp6 = tmp3 & tmp5
tmp7 = tmp6.to(tl.int64)
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = tmp5.to(tl.int64)
tmp12 = tl.broadcast_to(tmp11, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tmp15 = tmp10.to(tl.float32)
tmp16 = tmp14.to(tl.float32)
tmp17 = tmp15 / tmp16
tl.store(out_ptr2 + (tl.full([1], 0, tl.int32)), tmp17, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64)
# Topologically Sorted Source Nodes: [predictions], Original ATen: [aten.argmax]
stream0 = get_raw_stream(0)
triton_poi_fused_argmax_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
buf3 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [eq, valid_mask, correct, sum_1, float_2, sum_2, float_3, truediv], Original ATen: [aten.eq, aten.ne, aten.mul, aten.sum, aten._to_copy, aten.div]
triton_per_fused__to_copy_div_eq_mul_ne_sum_1.run(buf0, arg1_1, buf3, 1, 256, grid=grid(1), stream=stream0)
del arg1_1
del buf0
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from sklearn.metrics import *
import torch.nn as nn
def accuracy(logits, labels, ignore_index: 'int'=-100):
with torch.no_grad():
valid_mask = labels != ignore_index
predictions = logits.float().argmax(-1)
correct = (predictions == labels) * valid_mask
return correct.sum().float() / valid_mask.sum().float()
class Accuracy(nn.Module):
def __init__(self, ignore_index: 'int'=-100):
super().__init__()
self.ignore_index = ignore_index
def forward(self, inputs, target):
return accuracy(inputs, target, self.ignore_index)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from sklearn.metrics import *
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_argmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp32 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 > tmp1
tmp3 = tmp0 == tmp1
tmp4 = tmp0 != tmp0
tmp5 = tmp1 != tmp1
tmp6 = tmp4 > tmp5
tmp7 = tmp2 | tmp6
tmp8 = tmp4 & tmp5
tmp9 = tmp3 | tmp8
tmp10 = tl.full([1], 0, tl.int64)
tmp11 = tl.full([1], 1, tl.int64)
tmp12 = tmp10 < tmp11
tmp13 = tmp9 & tmp12
tmp14 = tmp7 | tmp13
tmp15 = tl.where(tmp14, tmp0, tmp1)
tmp16 = tl.where(tmp14, tmp10, tmp11)
tmp18 = tmp15 > tmp17
tmp19 = tmp15 == tmp17
tmp20 = tmp15 != tmp15
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1], 2, tl.int64)
tmp27 = tmp16 < tmp26
tmp28 = tmp25 & tmp27
tmp29 = tmp23 | tmp28
tmp30 = tl.where(tmp29, tmp15, tmp17)
tmp31 = tl.where(tmp29, tmp16, tmp26)
tmp33 = tmp30 > tmp32
tmp34 = tmp30 == tmp32
tmp35 = tmp30 != tmp30
tmp36 = tmp32 != tmp32
tmp37 = tmp35 > tmp36
tmp38 = tmp33 | tmp37
tmp39 = tmp35 & tmp36
tmp40 = tmp34 | tmp39
tmp41 = tl.full([1], 3, tl.int64)
tmp42 = tmp31 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tmp38 | tmp43
tl.where(tmp44, tmp30, tmp32)
tmp46 = tl.where(tmp44, tmp31, tmp41)
tl.store(out_ptr0 + x0, tmp46, xmask)
@triton.jit
def triton_per_fused__to_copy_div_eq_mul_ne_sum_1(in_ptr0, in_ptr1,
out_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex % 64
r2 = rindex
tmp0 = tl.load(in_ptr0 + r0, None, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + r2, None)
tmp1 = tmp0.to(tl.float32)
tmp3 = tmp1 == tmp2
tmp4 = -100.0
tmp5 = tmp2 != tmp4
tmp6 = tmp3 & tmp5
tmp7 = tmp6.to(tl.int64)
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = tmp5.to(tl.int64)
tmp12 = tl.broadcast_to(tmp11, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tmp15 = tmp10.to(tl.float32)
tmp16 = tmp14.to(tl.float32)
tmp17 = tmp15 / tmp16
tl.store(out_ptr2 + tl.full([1], 0, tl.int32), tmp17, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64)
get_raw_stream(0)
triton_poi_fused_argmax_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
buf3 = empty_strided_cuda((), (), torch.float32)
triton_per_fused__to_copy_div_eq_mul_ne_sum_1[grid(1)](buf0, arg1_1,
buf3, 1, 256, num_warps=2, num_stages=1)
del arg1_1
del buf0
return buf3,
def accuracy(logits, labels, ignore_index: 'int'=-100):
with torch.no_grad():
valid_mask = labels != ignore_index
predictions = logits.float().argmax(-1)
correct = (predictions == labels) * valid_mask
return correct.sum().float() / valid_mask.sum().float()
class AccuracyNew(nn.Module):
def __init__(self, ignore_index: 'int'=-100):
super().__init__()
self.ignore_index = ignore_index
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| Vasyka/DeepGQuad | Accuracy | false | 1,210 | [
"Apache-2.0"
] | 0 | 772a461732fc4044a1dee84d2688bf16960e272c | https://github.com/Vasyka/DeepGQuad/tree/772a461732fc4044a1dee84d2688bf16960e272c | import torch
from sklearn.metrics import *
import torch.nn as nn
def accuracy(logits, labels, ignore_index: 'int'=-100):
with torch.no_grad():
valid_mask = labels != ignore_index
predictions = logits.float().argmax(-1)
correct = (predictions == labels) * valid_mask
return correct.sum().float() / valid_mask.sum().float()
class Model(nn.Module):
def __init__(self, ignore_index: 'int'=-100):
super().__init__()
self.ignore_index = ignore_index
def forward(self, inputs, target):
return accuracy(inputs, target, self.ignore_index)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
InteractingLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ak/caklf2pyfxcrlk24jikpqfulearftlggift3k32sdo56rqpygtef.py
# Topologically Sorted Source Nodes: [querys_1], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# querys_1 => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%getitem, %getitem_1],), kwargs = {})
triton_poi_fused_stack_0 = async_compile.triton('triton_poi_fused_stack_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 8)
x0 = xindex % 2
x1 = (xindex // 2) % 4
x3 = xindex
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr0 + (2 + x0 + (4*x1) + (16*((-4) + x2))), tmp6 & xmask, other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/d7/cd7xa5d4yg5y7exr6s4sr25rd6okj4v7452l7cyhxnqr3mcd4qhj.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_15, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_15, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/kx/ckxzcazhsdasvh5sdcvshdrxriufwxfrn25tt7nuks5deb2u6ei5.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ee/ceebt7dp2dnq4qecrxhaoiakzivwsb6pug54s5t7st6c2qpbsii7.py
# Topologically Sorted Source Nodes: [result_4], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# result_4 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%squeeze_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_3 = async_compile.triton('triton_poi_fused_relu_threshold_backward_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp11 = tl.load(in_out_ptr0 + (x2), xmask)
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((2*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 4, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr0 + (32 + (2*x1) + ((-2) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tmp12 = tmp10 + tmp11
tmp13 = tl.full([1], 0, tl.int32)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp15 = 0.0
tmp16 = tmp14 <= tmp15
tl.store(in_out_ptr0 + (x2), tmp14, xmask)
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [querys], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_2, out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [keys], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_3, out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [values], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_4, out=buf2)
del primals_4
buf3 = empty_strided_cuda((8, 4, 2), (8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [querys_1], Original ATen: [aten.stack]
stream0 = get_raw_stream(0)
triton_poi_fused_stack_0.run(buf0, buf3, 64, grid=grid(64), stream=stream0)
buf4 = reinterpret_tensor(buf0, (8, 4, 2), (8, 2, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [keys_1], Original ATen: [aten.stack]
triton_poi_fused_stack_0.run(buf1, buf4, 64, grid=grid(64), stream=stream0)
buf5 = empty_strided_cuda((8, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [inner_product], Original ATen: [aten.bmm]
extern_kernels.bmm(buf3, reinterpret_tensor(buf4, (8, 2, 4), (8, 1, 2), 0), out=buf5)
buf6 = empty_strided_cuda((2, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf5, buf6, 128, grid=grid(128), stream=stream0)
buf7 = reinterpret_tensor(buf5, (2, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf6, buf7, 128, grid=grid(128), stream=stream0)
del buf6
buf8 = reinterpret_tensor(buf1, (8, 4, 2), (8, 2, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [values_1], Original ATen: [aten.stack]
triton_poi_fused_stack_0.run(buf2, buf8, 64, grid=grid(64), stream=stream0)
buf9 = reinterpret_tensor(buf2, (8, 4, 2), (8, 2, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [result], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf7, (8, 4, 4), (16, 4, 1), 0), buf8, out=buf9)
buf10 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [tensordot_3], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_5, out=buf10)
del primals_5
buf11 = reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0); del buf10 # reuse
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [result_4], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_3.run(buf11, buf9, buf12, 64, grid=grid(64), stream=stream0)
del buf9
return (buf11, buf7, buf7, buf12, reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), reinterpret_tensor(buf8, (8, 2, 4), (8, 1, 2), 0), reinterpret_tensor(buf3, (8, 2, 4), (8, 1, 2), 0), buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics import *
class InteractingLayer(nn.Module):
"""A Layer used in AutoInt that model the correlations between different feature fields by multi-head self-attention mechanism.
Input shape
- A 3D tensor with shape: ``(batch_size,field_size,embedding_size)``.
Output shape
- 3D tensor with shape:``(batch_size,field_size,embedding_size)``.
Arguments
- **in_features** : Positive integer, dimensionality of input features.
- **head_num**: int.The head number in multi-head self-attention network.
- **use_res**: bool.Whether or not use standard residual connections before output.
- **seed**: A Python integer to use as random seed.
References
- [Song W, Shi C, Xiao Z, et al. AutoInt: Automatic Feature Interaction Learning via Self-Attentive Neural Networks[J]. arXiv preprint arXiv:1810.11921, 2018.](https://arxiv.org/abs/1810.11921)
"""
def __init__(self, embedding_size, head_num=2, use_res=True, scaling=
False, seed=1024, device='cpu'):
super(InteractingLayer, self).__init__()
if head_num <= 0:
raise ValueError('head_num must be a int > 0')
if embedding_size % head_num != 0:
raise ValueError(
'embedding_size is not an integer multiple of head_num!')
self.att_embedding_size = embedding_size // head_num
self.head_num = head_num
self.use_res = use_res
self.scaling = scaling
self.seed = seed
self.W_Query = nn.Parameter(torch.Tensor(embedding_size,
embedding_size))
self.W_key = nn.Parameter(torch.Tensor(embedding_size, embedding_size))
self.W_Value = nn.Parameter(torch.Tensor(embedding_size,
embedding_size))
if self.use_res:
self.W_Res = nn.Parameter(torch.Tensor(embedding_size,
embedding_size))
for tensor in self.parameters():
nn.init.normal_(tensor, mean=0.0, std=0.05)
self
def forward(self, inputs):
if len(inputs.shape) != 3:
raise ValueError(
'Unexpected inputs dimensions %d, expect to be 3 dimensions' %
len(inputs.shape))
querys = torch.tensordot(inputs, self.W_Query, dims=([-1], [0]))
keys = torch.tensordot(inputs, self.W_key, dims=([-1], [0]))
values = torch.tensordot(inputs, self.W_Value, dims=([-1], [0]))
querys = torch.stack(torch.split(querys, self.att_embedding_size,
dim=2))
keys = torch.stack(torch.split(keys, self.att_embedding_size, dim=2))
values = torch.stack(torch.split(values, self.att_embedding_size,
dim=2))
inner_product = torch.einsum('bnik,bnjk->bnij', querys, keys)
if self.scaling:
inner_product /= self.att_embedding_size ** 0.5
self.normalized_att_scores = F.softmax(inner_product, dim=-1)
result = torch.matmul(self.normalized_att_scores, values)
result = torch.cat(torch.split(result, 1), dim=-1)
result = torch.squeeze(result, dim=0)
if self.use_res:
result += torch.tensordot(inputs, self.W_Res, dims=([-1], [0]))
result = F.relu(result)
return result
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'embedding_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
from sklearn.metrics import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 8
x0 = xindex % 2
x1 = xindex // 2 % 4
x3 = xindex
tmp0 = x2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr0 + (2 + x0 + 4 * x1 + 16 * (-4 + x2)), tmp6 &
xmask, other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp11 = tl.load(in_out_ptr0 + x2, xmask)
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (2 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 4, tl.int64)
tmp9 = tl.load(in_ptr0 + (32 + 2 * x1 + (-2 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tmp12 = tmp10 + tmp11
tmp13 = tl.full([1], 0, tl.int32)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp15 = 0.0
tmp16 = tmp14 <= tmp15
tl.store(in_out_ptr0 + x2, tmp14, xmask)
tl.store(out_ptr0 + x2, tmp16, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
primals_2, out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
primals_3, out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
primals_4, out=buf2)
del primals_4
buf3 = empty_strided_cuda((8, 4, 2), (8, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_stack_0[grid(64)](buf0, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf4 = reinterpret_tensor(buf0, (8, 4, 2), (8, 2, 1), 0)
del buf0
triton_poi_fused_stack_0[grid(64)](buf1, buf4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((8, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf3, reinterpret_tensor(buf4, (8, 2, 4), (8, 1,
2), 0), out=buf5)
buf6 = empty_strided_cuda((2, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(128)](buf5, buf6, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf5, (2, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_2[grid(128)](buf6, buf7, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del buf6
buf8 = reinterpret_tensor(buf1, (8, 4, 2), (8, 2, 1), 0)
del buf1
triton_poi_fused_stack_0[grid(64)](buf2, buf8, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf2, (8, 4, 2), (8, 2, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (8, 4, 4), (16, 4, 1),
0), buf8, out=buf9)
buf10 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
primals_5, out=buf10)
del primals_5
buf11 = reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0)
del buf10
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_3[grid(64)](buf11, buf9,
buf12, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf9
return buf11, buf7, buf7, buf12, reinterpret_tensor(primals_1, (4, 16),
(1, 4), 0), reinterpret_tensor(buf8, (8, 2, 4), (8, 1, 2), 0
), reinterpret_tensor(buf3, (8, 2, 4), (8, 1, 2), 0), buf4
class InteractingLayerNew(nn.Module):
"""A Layer used in AutoInt that model the correlations between different feature fields by multi-head self-attention mechanism.
Input shape
- A 3D tensor with shape: ``(batch_size,field_size,embedding_size)``.
Output shape
- 3D tensor with shape:``(batch_size,field_size,embedding_size)``.
Arguments
- **in_features** : Positive integer, dimensionality of input features.
- **head_num**: int.The head number in multi-head self-attention network.
- **use_res**: bool.Whether or not use standard residual connections before output.
- **seed**: A Python integer to use as random seed.
References
- [Song W, Shi C, Xiao Z, et al. AutoInt: Automatic Feature Interaction Learning via Self-Attentive Neural Networks[J]. arXiv preprint arXiv:1810.11921, 2018.](https://arxiv.org/abs/1810.11921)
"""
def __init__(self, embedding_size, head_num=2, use_res=True, scaling=
False, seed=1024, device='cpu'):
super(InteractingLayerNew, self).__init__()
if head_num <= 0:
raise ValueError('head_num must be a int > 0')
if embedding_size % head_num != 0:
raise ValueError(
'embedding_size is not an integer multiple of head_num!')
self.att_embedding_size = embedding_size // head_num
self.head_num = head_num
self.use_res = use_res
self.scaling = scaling
self.seed = seed
self.W_Query = nn.Parameter(torch.Tensor(embedding_size,
embedding_size))
self.W_key = nn.Parameter(torch.Tensor(embedding_size, embedding_size))
self.W_Value = nn.Parameter(torch.Tensor(embedding_size,
embedding_size))
if self.use_res:
self.W_Res = nn.Parameter(torch.Tensor(embedding_size,
embedding_size))
for tensor in self.parameters():
nn.init.normal_(tensor, mean=0.0, std=0.05)
self
def forward(self, input_0):
primals_2 = self.W_Query
primals_3 = self.W_key
primals_4 = self.W_Value
primals_5 = self.W_Res
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| Sunmyunghan/Final_Project | InteractingLayer | false | 1,211 | [
"MIT"
] | 0 | 28cde293dc6d07521b2e1c5613b20444aea91d21 | https://github.com/Sunmyunghan/Final_Project/tree/28cde293dc6d07521b2e1c5613b20444aea91d21 | import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics import *
class Model(nn.Module):
"""A Layer used in AutoInt that model the correlations between different feature fields by multi-head self-attention mechanism.
Input shape
- A 3D tensor with shape: ``(batch_size,field_size,embedding_size)``.
Output shape
- 3D tensor with shape:``(batch_size,field_size,embedding_size)``.
Arguments
- **in_features** : Positive integer, dimensionality of input features.
- **head_num**: int.The head number in multi-head self-attention network.
- **use_res**: bool.Whether or not use standard residual connections before output.
- **seed**: A Python integer to use as random seed.
References
- [Song W, Shi C, Xiao Z, et al. AutoInt: Automatic Feature Interaction Learning via Self-Attentive Neural Networks[J]. arXiv preprint arXiv:1810.11921, 2018.](https://arxiv.org/abs/1810.11921)
"""
def __init__(self, embedding_size, head_num=2, use_res=True, scaling=
False, seed=1024, device='cpu'):
super().__init__()
if head_num <= 0:
raise ValueError('head_num must be a int > 0')
if embedding_size % head_num != 0:
raise ValueError(
'embedding_size is not an integer multiple of head_num!')
self.att_embedding_size = embedding_size // head_num
self.head_num = head_num
self.use_res = use_res
self.scaling = scaling
self.seed = seed
self.W_Query = nn.Parameter(torch.Tensor(embedding_size,
embedding_size))
self.W_key = nn.Parameter(torch.Tensor(embedding_size, embedding_size))
self.W_Value = nn.Parameter(torch.Tensor(embedding_size,
embedding_size))
if self.use_res:
self.W_Res = nn.Parameter(torch.Tensor(embedding_size,
embedding_size))
for tensor in self.parameters():
nn.init.normal_(tensor, mean=0.0, std=0.05)
self
def forward(self, inputs):
if len(inputs.shape) != 3:
raise ValueError(
'Unexpected inputs dimensions %d, expect to be 3 dimensions' %
len(inputs.shape))
querys = torch.tensordot(inputs, self.W_Query, dims=([-1], [0]))
keys = torch.tensordot(inputs, self.W_key, dims=([-1], [0]))
values = torch.tensordot(inputs, self.W_Value, dims=([-1], [0]))
querys = torch.stack(torch.split(querys, self.att_embedding_size,
dim=2))
keys = torch.stack(torch.split(keys, self.att_embedding_size, dim=2))
values = torch.stack(torch.split(values, self.att_embedding_size,
dim=2))
inner_product = torch.einsum('bnik,bnjk->bnij', querys, keys)
if self.scaling:
inner_product /= self.att_embedding_size ** 0.5
self.normalized_att_scores = F.softmax(inner_product, dim=-1)
result = torch.matmul(self.normalized_att_scores, values)
result = torch.cat(torch.split(result, 1), dim=-1)
result = torch.squeeze(result, dim=0)
if self.use_res:
result += torch.tensordot(inputs, self.W_Res, dims=([-1], [0]))
result = F.relu(result)
return result
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4]
|
SoftDiceLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/wq/cwq5uygy7kcq5ujfxxfxcjhwrohlqk4d23fuusip7buftbecgoz2.py
# Topologically Sorted Source Nodes: [mul, numerator, mul_1, mul_2, add, denominator, truediv], Original ATen: [aten.mul, aten.sum, aten.add, aten.div]
# Source node to ATen node mapping:
# add => add
# denominator => sum_2
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# numerator => sum_1
# truediv => div
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg0_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %arg1_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%add,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %sum_2), kwargs = {})
triton_per_fused_add_div_mul_sum_0 = async_compile.triton('triton_per_fused_add_div_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mul_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tmp0 * tmp0
tmp7 = tmp1 * tmp1
tmp8 = tmp6 + tmp7
tmp9 = tl.broadcast_to(tmp8, [RBLOCK])
tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0))
tmp12 = tmp5 / tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp12, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mul, numerator, mul_1, mul_2, add, denominator, truediv], Original ATen: [aten.mul, aten.sum, aten.add, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mul_sum_0.run(buf2, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch.nn.modules.loss import _Loss
class SoftDiceLoss(_Loss):
def __init__(self, size_average=None, reduce=None, reduction='mean'):
super(SoftDiceLoss, self).__init__(size_average, reduce, reduction)
def forward(self, y_pred, y_gt):
numerator = torch.sum(y_pred * y_gt)
denominator = torch.sum(y_pred * y_pred + y_gt * y_gt)
return numerator / denominator
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.nn.modules.loss import _Loss
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mul_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tmp0 * tmp0
tmp7 = tmp1 * tmp1
tmp8 = tmp6 + tmp7
tmp9 = tl.broadcast_to(tmp8, [RBLOCK])
tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0))
tmp12 = tmp5 / tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mul_sum_0[grid(1)](buf2, arg0_1, arg1_1, 1,
256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf2,
class SoftDiceLossNew(_Loss):
def __init__(self, size_average=None, reduce=None, reduction='mean'):
super(SoftDiceLossNew, self).__init__(size_average, reduce, reduction)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| Will3577/Medical-Transformer | SoftDiceLoss | false | 1,212 | [
"MIT"
] | 0 | e72bfe68fcd55268f57bc7c27b4cbce8029d1b81 | https://github.com/Will3577/Medical-Transformer/tree/e72bfe68fcd55268f57bc7c27b4cbce8029d1b81 | import torch
from torch.nn.modules.loss import _Loss
class Model(_Loss):
def __init__(self, size_average=None, reduce=None, reduction='mean'):
super().__init__(size_average, reduce, reduction)
def forward(self, y_pred, y_gt):
numerator = torch.sum(y_pred * y_gt)
denominator = torch.sum(y_pred * y_pred + y_gt * y_gt)
return numerator / denominator
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
VGAE | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/q5/cq5wusjiorttrifgkbgmb575ri5bohmulexkpd7lpcdrnw7myr2f.py
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# output_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%bmm,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [support], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm]
extern_kernels.bmm(primals_3, reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), out=buf1)
buf2 = buf1; del buf1 # reuse
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf2, buf8, 64, grid=grid(64), stream=stream0)
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [support_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output_2], Original ATen: [aten.bmm]
extern_kernels.bmm(primals_3, reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0), out=buf4)
buf5 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [support_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output_3], Original ATen: [aten.bmm]
extern_kernels.bmm(primals_3, reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0), out=buf6)
buf7 = reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [adj], Original ATen: [aten.bmm]
extern_kernels.bmm(buf4, reinterpret_tensor(buf4, (4, 4, 4), (16, 1, 4), 0), out=buf7)
return (buf7, buf4, buf6, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(buf4, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(primals_3, (4, 4, 4), (16, 1, 4), 0), primals_5, primals_4, buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.nn import Module
import torch
import torch.nn.functional as F
from torch.nn.modules.module import Module
import torch.nn as nn
import torch.nn.modules.loss
import torch.utils.data
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, dropout=0.0, act=F.relu):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.dropout = dropout
self.act = act
self.linear = torch.nn.Linear(in_features, out_features, bias=False)
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.linear.weight)
def forward(self, input, adj):
input = F.dropout(input, self.dropout, self.training)
support = self.linear(input)
output = torch.bmm(adj, support)
output = self.act(output)
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class InnerProductDecoder(nn.Module):
"""Decoder for using inner product for prediction."""
def __init__(self, dropout, act=torch.sigmoid):
super(InnerProductDecoder, self).__init__()
self.dropout = dropout
self.act = act
def forward(self, z):
z = F.dropout(z, self.dropout, training=self.training)
adj = self.act(torch.bmm(z, torch.transpose(z, 1, 2)))
return adj
class VGAE(nn.Module):
def __init__(self, input_feat_dim, hidden_dim1, output_dim, dropout):
super(VGAE, self).__init__()
self.gc1 = GraphConvolution(input_feat_dim, hidden_dim1, dropout,
act=F.relu)
self.gc2 = GraphConvolution(hidden_dim1, output_dim, dropout, act=
lambda x: x)
self.gc3 = GraphConvolution(hidden_dim1, output_dim, dropout, act=
lambda x: x)
self.dc = InnerProductDecoder(dropout, act=lambda x: x)
def encode(self, x, adj):
hidden1 = self.gc1(x, adj)
return self.gc2(hidden1, adj), self.gc3(hidden1, adj)
def reparameterize(self, mu, logvar):
if self.training:
std = torch.exp(logvar)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
else:
return mu
def forward(self, x, adj):
mu, logvar = self.encode(x, adj)
z = self.reparameterize(mu, logvar)
return self.dc(z), mu, logvar
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_feat_dim': 4, 'hidden_dim1': 4, 'output_dim': 4,
'dropout': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.nn import Module
import torch.nn.functional as F
from torch.nn.modules.module import Module
import torch.nn as nn
import torch.nn.modules.loss
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(primals_3, reinterpret_tensor(buf0, (4, 4, 4), (
16, 4, 1), 0), out=buf1)
buf2 = buf1
del buf1
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(64)](buf2, buf8, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf3 = buf0
del buf0
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(primals_3, reinterpret_tensor(buf3, (4, 4, 4), (
16, 4, 1), 0), out=buf4)
buf5 = buf3
del buf3
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(primals_3, reinterpret_tensor(buf5, (4, 4, 4), (
16, 4, 1), 0), out=buf6)
buf7 = reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0)
del buf5
extern_kernels.bmm(buf4, reinterpret_tensor(buf4, (4, 4, 4), (16, 1,
4), 0), out=buf7)
return buf7, buf4, buf6, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(
buf4, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(primals_3, (4,
4, 4), (16, 1, 4), 0), primals_5, primals_4, buf8
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, dropout=0.0, act=F.relu):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.dropout = dropout
self.act = act
self.linear = torch.nn.Linear(in_features, out_features, bias=False)
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.linear.weight)
def forward(self, input, adj):
input = F.dropout(input, self.dropout, self.training)
support = self.linear(input)
output = torch.bmm(adj, support)
output = self.act(output)
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class InnerProductDecoder(nn.Module):
"""Decoder for using inner product for prediction."""
def __init__(self, dropout, act=torch.sigmoid):
super(InnerProductDecoder, self).__init__()
self.dropout = dropout
self.act = act
def forward(self, z):
z = F.dropout(z, self.dropout, training=self.training)
adj = self.act(torch.bmm(z, torch.transpose(z, 1, 2)))
return adj
class VGAENew(nn.Module):
def __init__(self, input_feat_dim, hidden_dim1, output_dim, dropout):
super(VGAENew, self).__init__()
self.gc1 = GraphConvolution(input_feat_dim, hidden_dim1, dropout,
act=F.relu)
self.gc2 = GraphConvolution(hidden_dim1, output_dim, dropout, act=
lambda x: x)
self.gc3 = GraphConvolution(hidden_dim1, output_dim, dropout, act=
lambda x: x)
self.dc = InnerProductDecoder(dropout, act=lambda x: x)
def encode(self, x, adj):
hidden1 = self.gc1(x, adj)
return self.gc2(hidden1, adj), self.gc3(hidden1, adj)
def reparameterize(self, mu, logvar):
if self.training:
std = torch.exp(logvar)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
else:
return mu
def forward(self, input_0, input_1):
primals_2 = self.gc1.linear.weight
primals_4 = self.gc2.linear.weight
primals_5 = self.gc3.linear.weight
primals_1 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1], output[2]
| WanyuGroup/CVPR2022-OrphicX | VGAE | false | 1,213 | [
"MIT"
] | 0 | 98d8d8259439c45661573e575cf956331df16abc | https://github.com/WanyuGroup/CVPR2022-OrphicX/tree/98d8d8259439c45661573e575cf956331df16abc | from torch.nn import Module
import torch
import torch.nn.functional as F
from torch.nn.modules.module import Module
import torch.nn as nn
import torch.nn.modules.loss
import torch.utils.data
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, dropout=0.0, act=F.relu):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.dropout = dropout
self.act = act
self.linear = torch.nn.Linear(in_features, out_features, bias=False)
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.linear.weight)
def forward(self, input, adj):
input = F.dropout(input, self.dropout, self.training)
support = self.linear(input)
output = torch.bmm(adj, support)
output = self.act(output)
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class InnerProductDecoder(nn.Module):
"""Decoder for using inner product for prediction."""
def __init__(self, dropout, act=torch.sigmoid):
super().__init__()
self.dropout = dropout
self.act = act
def forward(self, z):
z = F.dropout(z, self.dropout, training=self.training)
adj = self.act(torch.bmm(z, torch.transpose(z, 1, 2)))
return adj
class Model(nn.Module):
def __init__(self, input_feat_dim, hidden_dim1, output_dim, dropout):
super().__init__()
self.gc1 = GraphConvolution(input_feat_dim, hidden_dim1, dropout,
act=F.relu)
self.gc2 = GraphConvolution(hidden_dim1, output_dim, dropout, act=
lambda x: x)
self.gc3 = GraphConvolution(hidden_dim1, output_dim, dropout, act=
lambda x: x)
self.dc = InnerProductDecoder(dropout, act=lambda x: x)
def encode(self, x, adj):
hidden1 = self.gc1(x, adj)
return self.gc2(hidden1, adj), self.gc3(hidden1, adj)
def reparameterize(self, mu, logvar):
if self.training:
std = torch.exp(logvar)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
else:
return mu
def forward(self, x, adj):
mu, logvar = self.encode(x, adj)
z = self.reparameterize(mu, logvar)
return self.dc(z), mu, logvar
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_feat_dim': 4, 'hidden_dim1': 4, 'output_dim': 4,
'dropout': 0.5}]
|
SeparableConv1d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/4r/c4rfdh73mumvv5zisbd6bltsx5zw6wf6wdu6mpspnsxohocpvo6k.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.constant_pad_nd]
# Source node to ATen node mapping:
# out => constant_pad_nd
# Graph fragment:
# %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%unsqueeze, [0, 1]), kwargs = {})
triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 20
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = (xindex // 5)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.load(in_ptr0 + (x0 + (4*x1)), tmp2 & xmask, other=0.0)
tl.store(out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 4), (4, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4, 1), (4, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 5), (20, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.constant_pad_nd]
stream0 = get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0.run(primals_2, buf0, 20, grid=grid(20), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=4, bias=None)
assert_size_stride(buf1, (1, 4, 4), (16, 4, 1))
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf2, (1, 4, 4), (16, 4, 1))
return (reinterpret_tensor(buf2, (4, 4), (4, 1), 0), primals_1, primals_3, buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class SeparableConv1d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding='same', bias=False):
super(SeparableConv1d, self).__init__()
if stride > 1:
padding = 0
self.depthwise = nn.Conv1d(in_channels, in_channels, kernel_size=
kernel_size, groups=in_channels, bias=bias, padding=padding,
stride=stride)
self.pointwise = nn.Conv1d(in_channels, out_channels, kernel_size=1,
bias=bias)
def forward(self, x):
out = self.depthwise(x)
out = self.pointwise(out)
return out
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 20
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.load(in_ptr0 + (x0 + 4 * x1), tmp2 & xmask, other=0.0)
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 4), (4, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4, 1), (4, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 5), (20, 5, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(20)](primals_2, buf0, 20,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_2
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1,),
padding=(1,), dilation=(1,), transposed=False, output_padding=(
0,), groups=4, bias=None)
assert_size_stride(buf1, (1, 4, 4), (16, 4, 1))
buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf2, (1, 4, 4), (16, 4, 1))
return reinterpret_tensor(buf2, (4, 4), (4, 1), 0
), primals_1, primals_3, buf0, buf1
class SeparableConv1dNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding='same', bias=False):
super(SeparableConv1dNew, self).__init__()
if stride > 1:
padding = 0
self.depthwise = nn.Conv1d(in_channels, in_channels, kernel_size=
kernel_size, groups=in_channels, bias=bias, padding=padding,
stride=stride)
self.pointwise = nn.Conv1d(in_channels, out_channels, kernel_size=1,
bias=bias)
def forward(self, input_0):
primals_1 = self.depthwise.weight
primals_3 = self.pointwise.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| WhiteTeaDragon/hw-asr | SeparableConv1d | false | 1,214 | [
"MIT"
] | 0 | 78a767ab00a743b8d28d1fdad795f066fc0795da | https://github.com/WhiteTeaDragon/hw-asr/tree/78a767ab00a743b8d28d1fdad795f066fc0795da | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding='same', bias=False):
super().__init__()
if stride > 1:
padding = 0
self.depthwise = nn.Conv1d(in_channels, in_channels, kernel_size=
kernel_size, groups=in_channels, bias=bias, padding=padding,
stride=stride)
self.pointwise = nn.Conv1d(in_channels, out_channels, kernel_size=1,
bias=bias)
def forward(self, x):
out = self.depthwise(x)
out = self.pointwise(out)
return out
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
GetMask | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/vu/cvun6axbfiubqnkc625m5gpbykpzyfuvupvvh473te24qlxsmxk4.py
# Topologically Sorted Source Nodes: [ne, mask], Original ATen: [aten.ne, aten._to_copy]
# Source node to ATen node mapping:
# mask => convert_element_type
# ne => ne
# Graph fragment:
# %ne : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%arg0_1, 0), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%ne, torch.float32), kwargs = {})
triton_poi_fused__to_copy_ne_0 = async_compile.triton('triton_poi_fused__to_copy_ne_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_ne_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_ne_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 != tmp1
tmp3 = tmp2.to(tl.float32)
tl.store(out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [ne, mask], Original ATen: [aten.ne, aten._to_copy]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy_ne_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.multiprocessing
import torch.utils.data
class GetMask(torch.nn.Module):
"""
inputs: x: any size
outputs:mask: same size as input x
"""
def __init__(self, pad_idx=0):
super(GetMask, self).__init__()
self.pad_idx = pad_idx
def forward(self, x):
mask = torch.ne(x, self.pad_idx).float()
return mask
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.multiprocessing
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_ne_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 != tmp1
tmp3 = tmp2.to(tl.float32)
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__to_copy_ne_0[grid(256)](arg0_1, buf0, 256, XBLOCK
=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class GetMaskNew(torch.nn.Module):
"""
inputs: x: any size
outputs:mask: same size as input x
"""
def __init__(self, pad_idx=0):
super(GetMaskNew, self).__init__()
self.pad_idx = pad_idx
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| WuDiDaBinGe/TAKG | GetMask | false | 1,215 | [
"MIT"
] | 0 | 83e608e677a4ee74722d18cb5ef430f4f6c6ad31 | https://github.com/WuDiDaBinGe/TAKG/tree/83e608e677a4ee74722d18cb5ef430f4f6c6ad31 | import torch
import torch.multiprocessing
import torch.utils.data
class Model(torch.nn.Module):
"""
inputs: x: any size
outputs:mask: same size as input x
"""
def __init__(self, pad_idx=0):
super().__init__()
self.pad_idx = pad_idx
def forward(self, x):
mask = torch.ne(x, self.pad_idx).float()
return mask
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
SmoothCrossEntropyLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/nr/cnrkptzsuv7qm3ss6i6xgoxkou23z76h2vmwqkwz2zkgpdbxhedc.py
# Topologically Sorted Source Nodes: [lsm], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# lsm => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [-1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {})
triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/xz/cxzrhhdsigxzd7l33hed2jl7lxvtwhqe4lxz7hmqo775cqjakfzl.py
# Topologically Sorted Source Nodes: [targets, lsm, mul, sum_1, loss, loss_1], Original ATen: [aten.scatter, aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.mean]
# Source node to ATen node mapping:
# loss => neg
# loss_1 => mean
# lsm => exp, log, sub_1, sum_1
# mul => mul
# sum_1 => sum_2
# targets => scatter_upon_const_tensor
# Graph fragment:
# %scatter_upon_const_tensor : [num_users=1] = call_function[target=torch._inductor.fx_passes.post_grad.scatter_upon_const_tensor](args = (), kwargs = {shape: [4, 4], background_val: 0.0, dtype: torch.float32, dim: 1, selector: %unsqueeze, val: 1.0})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%scatter_upon_const_tensor, %sub_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1]), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_2,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%neg,), kwargs = {})
triton_per_fused__log_softmax_mean_mul_neg_scatter_sum_1 = async_compile.triton('triton_per_fused__log_softmax_mean_mul_neg_scatter_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_mean_mul_neg_scatter_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_mean_mul_neg_scatter_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 4
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (4*r2), None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (1 + (4*r2)), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + (2 + (4*r2)), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (3 + (4*r2)), None, eviction_policy='evict_last')
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp7 = tl_math.exp(tmp6)
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp12 = tl_math.exp(tmp11)
tmp13 = tmp10 + tmp12
tmp15 = tl_math.exp(tmp14)
tmp16 = tmp13 + tmp15
tmp17 = tl_math.log(tmp16)
tmp18 = tmp6 - tmp17
tmp19 = tmp5 * tmp18
tmp20 = tl.full([1, 1], 1, tl.int64)
tmp21 = tmp0 == tmp20
tmp22 = tl.where(tmp21, tmp3, tmp4)
tmp23 = tmp8 - tmp17
tmp24 = tmp22 * tmp23
tmp25 = tmp19 + tmp24
tmp26 = tl.full([1, 1], 2, tl.int64)
tmp27 = tmp0 == tmp26
tmp28 = tl.where(tmp27, tmp3, tmp4)
tmp29 = tmp11 - tmp17
tmp30 = tmp28 * tmp29
tmp31 = tmp25 + tmp30
tmp32 = tl.full([1, 1], 3, tl.int64)
tmp33 = tmp0 == tmp32
tmp34 = tl.where(tmp33, tmp3, tmp4)
tmp35 = tmp14 - tmp17
tmp36 = tmp34 * tmp35
tmp37 = tmp31 + tmp36
tmp38 = -tmp37
tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK])
tmp41 = tl.sum(tmp39, 1)[:, None]
tmp42 = 64.0
tmp43 = tmp41 / tmp42
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp43, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [lsm], Original ATen: [aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [targets, lsm, mul, sum_1, loss, loss_1], Original ATen: [aten.scatter, aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.mean]
triton_per_fused__log_softmax_mean_mul_neg_scatter_sum_1.run(buf3, arg1_1, buf0, 1, 64, grid=grid(1), stream=stream0)
del arg1_1
del buf0
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.int64)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
from torch.nn.modules.loss import _WeightedLoss
import torch.utils.tensorboard
class SmoothCrossEntropyLoss(_WeightedLoss):
def __init__(self, weight=None, reduction='mean', smoothing=0.0):
super().__init__(weight=weight, reduction=reduction)
self.smoothing = smoothing
self.weight = weight
self.reduction = reduction
@staticmethod
def _smooth_one_hot(targets: 'torch.Tensor', n_classes: 'int',
smoothing=0.0):
assert 0 <= smoothing < 1
with torch.no_grad():
targets = torch.empty(size=(targets.size(0), n_classes), device
=targets.device).fill_(smoothing / (n_classes - 1)).scatter_(
1, targets.data.unsqueeze(1), 1.0 - smoothing)
return targets
def forward(self, inputs, targets):
targets = SmoothCrossEntropyLoss._smooth_one_hot(targets, inputs.
size(-1), self.smoothing)
lsm = F.log_softmax(inputs, -1)
if self.weight is not None:
lsm = lsm * self.weight.unsqueeze(0)
loss = -(targets * lsm).sum(-1)
if self.reduction == 'sum':
loss = loss.sum()
elif self.reduction == 'mean':
loss = loss.mean()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.ones([4], dtype=torch.int64)]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch.nn.modules.loss import _WeightedLoss
import torch.utils.tensorboard
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_mean_mul_neg_scatter_sum_1(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 4
r2 = rindex
tmp0 = tl.load(in_ptr0 + r0, None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + 4 * r2, None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (1 + 4 * r2), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + (2 + 4 * r2), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (3 + 4 * r2), None, eviction_policy='evict_last')
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp7 = tl_math.exp(tmp6)
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp12 = tl_math.exp(tmp11)
tmp13 = tmp10 + tmp12
tmp15 = tl_math.exp(tmp14)
tmp16 = tmp13 + tmp15
tmp17 = tl_math.log(tmp16)
tmp18 = tmp6 - tmp17
tmp19 = tmp5 * tmp18
tmp20 = tl.full([1, 1], 1, tl.int64)
tmp21 = tmp0 == tmp20
tmp22 = tl.where(tmp21, tmp3, tmp4)
tmp23 = tmp8 - tmp17
tmp24 = tmp22 * tmp23
tmp25 = tmp19 + tmp24
tmp26 = tl.full([1, 1], 2, tl.int64)
tmp27 = tmp0 == tmp26
tmp28 = tl.where(tmp27, tmp3, tmp4)
tmp29 = tmp11 - tmp17
tmp30 = tmp28 * tmp29
tmp31 = tmp25 + tmp30
tmp32 = tl.full([1, 1], 3, tl.int64)
tmp33 = tmp0 == tmp32
tmp34 = tl.where(tmp33, tmp3, tmp4)
tmp35 = tmp14 - tmp17
tmp36 = tmp34 * tmp35
tmp37 = tmp31 + tmp36
tmp38 = -tmp37
tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK])
tmp41 = tl.sum(tmp39, 1)[:, None]
tmp42 = 64.0
tmp43 = tmp41 / tmp42
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp43, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
triton_per_fused__log_softmax_mean_mul_neg_scatter_sum_1[grid(1)](buf3,
arg1_1, buf0, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg1_1
del buf0
return buf3,
class SmoothCrossEntropyLossNew(_WeightedLoss):
def __init__(self, weight=None, reduction='mean', smoothing=0.0):
super().__init__(weight=weight, reduction=reduction)
self.smoothing = smoothing
self.weight = weight
self.reduction = reduction
@staticmethod
def _smooth_one_hot(targets: 'torch.Tensor', n_classes: 'int',
smoothing=0.0):
assert 0 <= smoothing < 1
with torch.no_grad():
targets = torch.empty(size=(targets.size(0), n_classes), device
=targets.device).fill_(smoothing / (n_classes - 1)).scatter_(
1, targets.data.unsqueeze(1), 1.0 - smoothing)
return targets
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| Dieg0Alejandr0/3D-Generative-SBDD | SmoothCrossEntropyLoss | false | 1,216 | [
"MIT"
] | 0 | 51ffd36a6cf5048eeff6e68186a4608048feea4c | https://github.com/Dieg0Alejandr0/3D-Generative-SBDD/tree/51ffd36a6cf5048eeff6e68186a4608048feea4c | import torch
import torch.nn.functional as F
from torch.nn.modules.loss import _WeightedLoss
import torch.utils.tensorboard
class Model(_WeightedLoss):
def __init__(self, weight=None, reduction='mean', smoothing=0.0):
super().__init__(weight=weight, reduction=reduction)
self.smoothing = smoothing
self.weight = weight
self.reduction = reduction
@staticmethod
def _smooth_one_hot(targets: 'torch.Tensor', n_classes: 'int',
smoothing=0.0):
assert 0 <= smoothing < 1
with torch.no_grad():
targets = torch.empty(size=(targets.size(0), n_classes), device
=targets.device).fill_(smoothing / (n_classes - 1)).scatter_(
1, targets.data.unsqueeze(1), 1.0 - smoothing)
return targets
def forward(self, inputs, targets):
targets = SmoothCrossEntropyLoss._smooth_one_hot(targets, inputs.
size(-1), self.smoothing)
lsm = F.log_softmax(inputs, -1)
if self.weight is not None:
lsm = lsm * self.weight.unsqueeze(0)
loss = -(targets * lsm).sum(-1)
if self.reduction == 'sum':
loss = loss.sum()
elif self.reduction == 'mean':
loss = loss.mean()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.ones([4], dtype=torch.int64)]
def get_init_inputs():
return []
|
StandardNLL | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/xp/cxp3n4erp2bfm2zjjzavoa542wl2zmildh36kbphrkxvtlr74pw5.py
# Topologically Sorted Source Nodes: [log_P_2, sum_1, sum_2, sum_log_P, neg], Original ATen: [aten.mul, aten.sum, aten.div, aten.neg]
# Source node to ATen node mapping:
# log_P_2 => mul
# neg => neg
# sum_1 => sum_1
# sum_2 => sum_2
# sum_log_P => div
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, %arg0_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg0_1, [1]), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %sum_2), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%div,), kwargs = {})
triton_poi_fused_div_mul_neg_sum_0 = async_compile.triton('triton_poi_fused_div_mul_neg_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*i64', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_mul_neg_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_mul_neg_sum_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (x0 + (64*x1)), xmask)
tmp10 = tl.load(in_ptr2 + (16 + x0 + (64*x1)), xmask)
tmp13 = tl.load(in_ptr2 + (32 + x0 + (64*x1)), xmask)
tmp16 = tl.load(in_ptr2 + (48 + x0 + (64*x1)), xmask)
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert(((0 <= tmp4) & (tmp4 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp4 < 4")
tmp6 = tl.load(in_ptr1 + (tmp4 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tmp6.to(tl.float32)
tmp9 = tmp7 * tmp8
tmp11 = tmp7 * tmp10
tmp12 = tmp9 + tmp11
tmp14 = tmp7 * tmp13
tmp15 = tmp12 + tmp14
tmp17 = tmp7 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp8 + tmp10
tmp20 = tmp19 + tmp13
tmp21 = tmp20 + tmp16
tmp22 = tmp18 / tmp21
tmp23 = -tmp22
tl.store(out_ptr0 + (x2), tmp23, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_P_2, sum_1, sum_2, sum_log_P, neg], Original ATen: [aten.mul, aten.sum, aten.div, aten.neg]
stream0 = get_raw_stream(0)
triton_poi_fused_div_mul_neg_sum_0.run(arg2_1, arg1_1, arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.int64)
arg2_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.int64)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.multiprocessing
import torch.utils.data
class StandardNLL(torch.nn.modules.loss._Loss):
"""
Shape:
log_prob: batch x time x class
y_true: batch x time
mask: batch x time
output: batch
"""
def forward(self, log_prob, y_true, mask):
mask = mask.float()
log_P = torch.gather(log_prob.view(-1, log_prob.size(2)), 1, y_true
.contiguous().view(-1, 1))
log_P = log_P.view(y_true.size(0), y_true.size(1))
log_P = log_P * mask
sum_log_P = torch.sum(log_P, dim=1) / torch.sum(mask, dim=1)
return -sum_log_P
def get_inputs():
return [torch.ones([4, 4, 4], dtype=torch.int64), torch.ones([4, 4],
dtype=torch.int64), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.multiprocessing
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_mul_neg_sum_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (x0 + 64 * x1), xmask)
tmp10 = tl.load(in_ptr2 + (16 + x0 + 64 * x1), xmask)
tmp13 = tl.load(in_ptr2 + (32 + x0 + 64 * x1), xmask)
tmp16 = tl.load(in_ptr2 + (48 + x0 + 64 * x1), xmask)
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask,
'index out of bounds: 0 <= tmp4 < 4')
tmp6 = tl.load(in_ptr1 + (tmp4 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp7 = tmp6.to(tl.float32)
tmp9 = tmp7 * tmp8
tmp11 = tmp7 * tmp10
tmp12 = tmp9 + tmp11
tmp14 = tmp7 * tmp13
tmp15 = tmp12 + tmp14
tmp17 = tmp7 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp8 + tmp10
tmp20 = tmp19 + tmp13
tmp21 = tmp20 + tmp16
tmp22 = tmp18 / tmp21
tmp23 = -tmp22
tl.store(out_ptr0 + x2, tmp23, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_mul_neg_sum_0[grid(64)](arg2_1, arg1_1, arg0_1,
buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf0,
class StandardNLLNew(torch.nn.modules.loss._Loss):
"""
Shape:
log_prob: batch x time x class
y_true: batch x time
mask: batch x time
output: batch
"""
def forward(self, input_0, input_1, input_2):
arg1_1 = input_0
arg2_1 = input_1
arg0_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| WuDiDaBinGe/TAKG | StandardNLL | false | 1,217 | [
"MIT"
] | 0 | 83e608e677a4ee74722d18cb5ef430f4f6c6ad31 | https://github.com/WuDiDaBinGe/TAKG/tree/83e608e677a4ee74722d18cb5ef430f4f6c6ad31 | import torch
import torch.multiprocessing
import torch.utils.data
class Model(torch.nn.modules.loss._Loss):
"""
Shape:
log_prob: batch x time x class
y_true: batch x time
mask: batch x time
output: batch
"""
def forward(self, log_prob, y_true, mask):
mask = mask.float()
log_P = torch.gather(log_prob.view(-1, log_prob.size(2)), 1, y_true
.contiguous().view(-1, 1))
log_P = log_P.view(y_true.size(0), y_true.size(1))
log_P = log_P * mask
sum_log_P = torch.sum(log_P, dim=1) / torch.sum(mask, dim=1)
return -sum_log_P
def get_inputs():
return [torch.ones([4, 4, 4], dtype=torch.int64), torch.ones([4, 4],
dtype=torch.int64), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
MLP_model | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/iw/ciwzqbjwnj7d5oa6p5yefl4n5gdocf3qor6rcesklcxblhq2hpwa.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# out_1 => relu
# Graph fragment:
# %add_tensor_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_5, %primals_3), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_5,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4096
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/uz/cuzbnz5k52xhjpdoaucm3u5qsyj2prasqn3xsty665xnmdng4lmj.py
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# out_3 => relu_1
# Graph fragment:
# %add_tensor_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_4, %primals_5), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_4,), kwargs = {})
triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/65/c65tc5xm2a3caq7ssexgjgocmcydeqgh76csqhvewry2b7btv4fj.py
# Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# out_5 => relu_2
# Graph fragment:
# %add_tensor_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_3, %primals_7), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_3,), kwargs = {})
triton_poi_fused_relu_2 = async_compile.triton('triton_poi_fused_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/2q/c2qz2ws53yjsy6jju72g7tnxehfjxae2rfg45uiafec7haukf7jm.py
# Topologically Sorted Source Nodes: [out_7], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# out_7 => relu_3
# Graph fragment:
# %add_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_9), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_2,), kwargs = {})
triton_poi_fused_relu_3 = async_compile.triton('triton_poi_fused_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/2y/c2y7nraxg57efqjgaab4g2ywiolmgkkaxdzjcqfjeb2guhagama5.py
# Topologically Sorted Source Nodes: [out_9], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# out_9 => relu_4
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_11), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_4 = async_compile.triton('triton_poi_fused_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/kg/ckggh7vcwkx75oqfl3ddc3alsf6amoizuvccqqz7hr4wtjlek5uo.py
# Topologically Sorted Source Nodes: [out_11], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# out_11 => relu_5
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_13), kwargs = {})
# %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_5 = async_compile.triton('triton_poi_fused_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4096, 4), (4, 1))
assert_size_stride(primals_3, (4096, ), (1, ))
assert_size_stride(primals_4, (2048, 4096), (4096, 1))
assert_size_stride(primals_5, (2048, ), (1, ))
assert_size_stride(primals_6, (512, 2048), (2048, 1))
assert_size_stride(primals_7, (512, ), (1, ))
assert_size_stride(primals_8, (128, 512), (512, 1))
assert_size_stride(primals_9, (128, ), (1, ))
assert_size_stride(primals_10, (64, 128), (128, 1))
assert_size_stride(primals_11, (64, ), (1, ))
assert_size_stride(primals_12, (32, 64), (64, 1))
assert_size_stride(primals_13, (32, ), (1, ))
assert_size_stride(primals_14, (4, 32), (32, 1))
assert_size_stride(primals_15, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4096), (4096, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4096), (1, 4), 0), out=buf0)
del primals_2
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, primals_3, 16384, grid=grid(16384), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (4096, 2048), (1, 4096), 0), out=buf2)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf3, primals_5, 8192, grid=grid(8192), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf3, reinterpret_tensor(primals_6, (2048, 512), (1, 2048), 0), out=buf4)
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.relu]
triton_poi_fused_relu_2.run(buf5, primals_7, 2048, grid=grid(2048), stream=stream0)
del primals_7
buf6 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf5, reinterpret_tensor(primals_8, (512, 128), (1, 512), 0), out=buf6)
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [out_7], Original ATen: [aten.relu]
triton_poi_fused_relu_3.run(buf7, primals_9, 512, grid=grid(512), stream=stream0)
del primals_9
buf8 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf7, reinterpret_tensor(primals_10, (128, 64), (1, 128), 0), out=buf8)
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [out_9], Original ATen: [aten.relu]
triton_poi_fused_relu_4.run(buf9, primals_11, 256, grid=grid(256), stream=stream0)
del primals_11
buf10 = empty_strided_cuda((4, 32), (32, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf9, reinterpret_tensor(primals_12, (64, 32), (1, 64), 0), out=buf10)
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [out_11], Original ATen: [aten.relu]
triton_poi_fused_relu_5.run(buf11, primals_13, 128, grid=grid(128), stream=stream0)
del primals_13
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_12], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_15, buf11, reinterpret_tensor(primals_14, (32, 4), (1, 32), 0), alpha=1, beta=1, out=buf12)
del primals_15
return (buf12, primals_1, buf1, buf3, buf5, buf7, buf9, buf11, primals_14, primals_12, primals_10, primals_8, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4096, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((2048, 4096), (4096, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((512, 2048), (2048, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((128, 512), (512, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((64, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((32, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, 32), (32, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class MLP_model(nn.Module):
"""Feedfoward neural network with 6 hidden layer"""
def __init__(self, in_size, out_size):
super().__init__()
self.linear1 = nn.Linear(in_size, 4096)
self.linear2 = nn.Linear(4096, 2048)
self.linear3 = nn.Linear(2048, 512)
self.linear4 = nn.Linear(512, 128)
self.linear5 = nn.Linear(128, 64)
self.linear6 = nn.Linear(64, 32)
self.linear7 = nn.Linear(32, out_size)
def forward(self, xb):
xb = xb.view(xb.size(0), -1)
out = self.linear1(xb)
out = F.relu(out)
out = self.linear2(out)
out = F.relu(out)
out = self.linear3(out)
out = F.relu(out)
out = self.linear4(out)
out = F.relu(out)
out = self.linear5(out)
out = F.relu(out)
out = self.linear6(out)
out = F.relu(out)
out = self.linear7(out)
return out
def training_step(self, batch, criterion):
images, labels = batch
out = self(images)
loss = criterion(out, labels)
return loss
def validation_step(self, batch):
images, labels = batch
out = self(images)
loss = F.cross_entropy(out, labels)
acc = self.accuracy(out, labels)
return {'val_loss': loss, 'val_acc': acc}
def validation_epoch_end(self, outputs):
batch_losses = [x['val_loss'] for x in outputs]
epoch_loss = torch.stack(batch_losses).mean()
batch_accs = [x['val_acc'] for x in outputs]
epoch_acc = torch.stack(batch_accs).mean()
return {'val_loss': epoch_loss.item(), 'val_acc': epoch_acc.item()}
def epoch_end(self, epoch, result):
None
def accuracy(self, outputs, labels):
_, preds = torch.max(outputs, dim=1)
return torch.tensor(torch.sum(preds == labels).item() / len(preds))
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_size': 4, 'out_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4096
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4096, 4), (4, 1))
assert_size_stride(primals_3, (4096,), (1,))
assert_size_stride(primals_4, (2048, 4096), (4096, 1))
assert_size_stride(primals_5, (2048,), (1,))
assert_size_stride(primals_6, (512, 2048), (2048, 1))
assert_size_stride(primals_7, (512,), (1,))
assert_size_stride(primals_8, (128, 512), (512, 1))
assert_size_stride(primals_9, (128,), (1,))
assert_size_stride(primals_10, (64, 128), (128, 1))
assert_size_stride(primals_11, (64,), (1,))
assert_size_stride(primals_12, (32, 64), (64, 1))
assert_size_stride(primals_13, (32,), (1,))
assert_size_stride(primals_14, (4, 32), (32, 1))
assert_size_stride(primals_15, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4096), (4096, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4096
), (1, 4), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(16384)](buf1, primals_3, 16384, XBLOCK
=128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (4096, 2048),
(1, 4096), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_1[grid(8192)](buf3, primals_5, 8192, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
extern_kernels.mm(buf3, reinterpret_tensor(primals_6, (2048, 512),
(1, 2048), 0), out=buf4)
buf5 = buf4
del buf4
triton_poi_fused_relu_2[grid(2048)](buf5, primals_7, 2048, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
extern_kernels.mm(buf5, reinterpret_tensor(primals_8, (512, 128), (
1, 512), 0), out=buf6)
buf7 = buf6
del buf6
triton_poi_fused_relu_3[grid(512)](buf7, primals_9, 512, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_9
buf8 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
extern_kernels.mm(buf7, reinterpret_tensor(primals_10, (128, 64), (
1, 128), 0), out=buf8)
buf9 = buf8
del buf8
triton_poi_fused_relu_4[grid(256)](buf9, primals_11, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_11
buf10 = empty_strided_cuda((4, 32), (32, 1), torch.float32)
extern_kernels.mm(buf9, reinterpret_tensor(primals_12, (64, 32), (1,
64), 0), out=buf10)
buf11 = buf10
del buf10
triton_poi_fused_relu_5[grid(128)](buf11, primals_13, 128, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_13
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_15, buf11, reinterpret_tensor(
primals_14, (32, 4), (1, 32), 0), alpha=1, beta=1, out=buf12)
del primals_15
return (buf12, primals_1, buf1, buf3, buf5, buf7, buf9, buf11,
primals_14, primals_12, primals_10, primals_8, primals_6, primals_4)
class MLP_modelNew(nn.Module):
"""Feedfoward neural network with 6 hidden layer"""
def __init__(self, in_size, out_size):
super().__init__()
self.linear1 = nn.Linear(in_size, 4096)
self.linear2 = nn.Linear(4096, 2048)
self.linear3 = nn.Linear(2048, 512)
self.linear4 = nn.Linear(512, 128)
self.linear5 = nn.Linear(128, 64)
self.linear6 = nn.Linear(64, 32)
self.linear7 = nn.Linear(32, out_size)
def training_step(self, batch, criterion):
images, labels = batch
out = self(images)
loss = criterion(out, labels)
return loss
def validation_step(self, batch):
images, labels = batch
out = self(images)
loss = F.cross_entropy(out, labels)
acc = self.accuracy(out, labels)
return {'val_loss': loss, 'val_acc': acc}
def validation_epoch_end(self, outputs):
batch_losses = [x['val_loss'] for x in outputs]
epoch_loss = torch.stack(batch_losses).mean()
batch_accs = [x['val_acc'] for x in outputs]
epoch_acc = torch.stack(batch_accs).mean()
return {'val_loss': epoch_loss.item(), 'val_acc': epoch_acc.item()}
def epoch_end(self, epoch, result):
None
def accuracy(self, outputs, labels):
_, preds = torch.max(outputs, dim=1)
return torch.tensor(torch.sum(preds == labels).item() / len(preds))
def forward(self, input_0):
primals_2 = self.linear1.weight
primals_3 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_6 = self.linear3.weight
primals_7 = self.linear3.bias
primals_8 = self.linear4.weight
primals_9 = self.linear4.bias
primals_10 = self.linear5.weight
primals_11 = self.linear5.bias
primals_12 = self.linear6.weight
primals_13 = self.linear6.bias
primals_14 = self.linear7.weight
primals_15 = self.linear7.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15])
return output[0]
| WeihengXia0123/Machine_Learning_Service | MLP_model | false | 1,218 | [
"MIT"
] | 0 | 516d64ff780317ee96e18584001b77165ce6531c | https://github.com/WeihengXia0123/Machine_Learning_Service/tree/516d64ff780317ee96e18584001b77165ce6531c | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
"""Feedfoward neural network with 6 hidden layer"""
def __init__(self, in_size, out_size):
super().__init__()
self.linear1 = nn.Linear(in_size, 4096)
self.linear2 = nn.Linear(4096, 2048)
self.linear3 = nn.Linear(2048, 512)
self.linear4 = nn.Linear(512, 128)
self.linear5 = nn.Linear(128, 64)
self.linear6 = nn.Linear(64, 32)
self.linear7 = nn.Linear(32, out_size)
def forward(self, xb):
xb = xb.view(xb.size(0), -1)
out = self.linear1(xb)
out = F.relu(out)
out = self.linear2(out)
out = F.relu(out)
out = self.linear3(out)
out = F.relu(out)
out = self.linear4(out)
out = F.relu(out)
out = self.linear5(out)
out = F.relu(out)
out = self.linear6(out)
out = F.relu(out)
out = self.linear7(out)
return out
def training_step(self, batch, criterion):
images, labels = batch
out = self(images)
loss = criterion(out, labels)
return loss
def validation_step(self, batch):
images, labels = batch
out = self(images)
loss = F.cross_entropy(out, labels)
acc = self.accuracy(out, labels)
return {'val_loss': loss, 'val_acc': acc}
def validation_epoch_end(self, outputs):
batch_losses = [x['val_loss'] for x in outputs]
epoch_loss = torch.stack(batch_losses).mean()
batch_accs = [x['val_acc'] for x in outputs]
epoch_acc = torch.stack(batch_accs).mean()
return {'val_loss': epoch_loss.item(), 'val_acc': epoch_acc.item()}
def epoch_end(self, epoch, result):
None
def accuracy(self, outputs, labels):
_, preds = torch.max(outputs, dim=1)
return torch.tensor(torch.sum(preds == labels).item() / len(preds))
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [4, 4]
|
MaskedSoftmax | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/3o/c3oi5zh3uy76k3c2wvknoi2wkyr3vgwuzzpbn2244rlne2yo26ji.py
# Topologically Sorted Source Nodes: [max_1, sub], Original ATen: [aten.max, aten.sub]
# Source node to ATen node mapping:
# max_1 => max_1
# sub => sub
# Graph fragment:
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%arg0_1, 4, True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %getitem), kwargs = {})
triton_poi_fused_max_sub_0 = async_compile.triton('triton_poi_fused_max_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ee/ceew63slkavsjpseewpffuujifc2gmu2m6n5u4732xnkvycwlswh.py
# Topologically Sorted Source Nodes: [dist], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# dist => amax, exp, sub_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%sub, [4], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/jo/cjooyf2taupk6b3rhpvd4u5im6tyfn25cyirn5yix7vtprzujjxg.py
# Topologically Sorted Source Nodes: [dist], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# dist => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [4], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_1, sub], Original ATen: [aten.max, aten.sub]
stream0 = get_raw_stream(0)
triton_poi_fused_max_sub_0.run(arg0_1, buf0, 1024, grid=grid(1024), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [dist], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf0, buf1, 1024, grid=grid(1024), stream=stream0)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [dist], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf1, buf2, 1024, grid=grid(1024), stream=stream0)
del buf1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch.nn import functional as F
import torch.multiprocessing
from torch import nn
import torch.utils.data
class MaskedSoftmax(nn.Module):
def __init__(self, dim):
super(MaskedSoftmax, self).__init__()
self.dim = dim
def forward(self, logit, mask=None):
if mask is None:
max_value = torch.max(logit, dim=self.dim, keepdim=True)[0]
dist = F.softmax(logit - max_value, dim=self.dim)
else:
dist_ = F.softmax(logit - torch.max(logit, dim=self.dim,
keepdim=True)[0], dim=self.dim) * mask
normalization_factor = dist_.sum(self.dim, keepdim=True)
dist = dist_ / normalization_factor
return dist
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.multiprocessing
from torch import nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_max_sub_0[grid(1024)](arg0_1, buf0, 1024, XBLOCK=
128, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused__softmax_1[grid(1024)](buf0, buf1, 1024, XBLOCK=
256, num_warps=4, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused__softmax_2[grid(1024)](buf1, buf2, 1024, XBLOCK=
256, num_warps=4, num_stages=1)
del buf1
return buf2,
class MaskedSoftmaxNew(nn.Module):
def __init__(self, dim):
super(MaskedSoftmaxNew, self).__init__()
self.dim = dim
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| WuDiDaBinGe/TAKG | MaskedSoftmax | false | 1,219 | [
"MIT"
] | 0 | 83e608e677a4ee74722d18cb5ef430f4f6c6ad31 | https://github.com/WuDiDaBinGe/TAKG/tree/83e608e677a4ee74722d18cb5ef430f4f6c6ad31 | import torch
from torch.nn import functional as F
import torch.multiprocessing
from torch import nn
import torch.utils.data
class Model(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, logit, mask=None):
if mask is None:
max_value = torch.max(logit, dim=self.dim, keepdim=True)[0]
dist = F.softmax(logit - max_value, dim=self.dim)
else:
dist_ = F.softmax(logit - torch.max(logit, dim=self.dim,
keepdim=True)[0], dim=self.dim) * mask
normalization_factor = dist_.sum(self.dim, keepdim=True)
dist = dist_ / normalization_factor
return dist
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
ShiftedSoftplus | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ff/cfflcrfbkqug6wu6spd6cdudrcnssg32k3762qutew77yjljfd4b.py
# Topologically Sorted Source Nodes: [softplus, sub], Original ATen: [aten.softplus, aten.sub]
# Source node to ATen node mapping:
# softplus => exp, gt, log1p, where
# sub => sub
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, 20), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg0_1,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %arg0_1, %log1p), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, 0.6931471824645996), kwargs = {})
triton_poi_fused_softplus_sub_0 = async_compile.triton('triton_poi_fused_softplus_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_softplus_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_softplus_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = 0.6931471824645996
tmp7 = tmp5 - tmp6
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softplus, sub], Original ATen: [aten.softplus, aten.sub]
stream0 = get_raw_stream(0)
triton_poi_fused_softplus_sub_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.tensorboard
class ShiftedSoftplus(nn.Module):
def __init__(self):
super().__init__()
self.shift = torch.log(torch.tensor(2.0)).item()
def forward(self, x):
return F.softplus(x) - self.shift
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.utils.tensorboard
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_softplus_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = 0.6931471824645996
tmp7 = tmp5 - tmp6
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_softplus_sub_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ShiftedSoftplusNew(nn.Module):
def __init__(self):
super().__init__()
self.shift = torch.log(torch.tensor(2.0)).item()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Dieg0Alejandr0/3D-Generative-SBDD | ShiftedSoftplus | false | 1,220 | [
"MIT"
] | 0 | 51ffd36a6cf5048eeff6e68186a4608048feea4c | https://github.com/Dieg0Alejandr0/3D-Generative-SBDD/tree/51ffd36a6cf5048eeff6e68186a4608048feea4c | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.tensorboard
class Model(nn.Module):
def __init__(self):
super().__init__()
self.shift = torch.log(torch.tensor(2.0)).item()
def forward(self, x):
return F.softplus(x) - self.shift
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
InnerProductLayer | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/dx/cdxuxa4usulotytq2uhe2lpzvskzdaihvkxtup7hxtpolwz634sr.py
# Topologically Sorted Source Nodes: [p], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# p => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%select, %select_1, %select_2, %select_3, %select_4, %select_5], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 24
x0 = xindex % 4
x2 = (xindex // 96)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (x0 + (4*((-4) + x1)) + (16*x2)), tmp9 & xmask, other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (x0 + (4*((-8) + x1)) + (16*x2)), tmp14 & xmask, other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 16, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr0 + (64 + x0 + (4*((-12) + x1)) + (16*x2)), tmp19 & xmask, other=0.0)
tmp21 = tmp0 >= tmp17
tmp22 = tl.full([1], 20, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tmp21 & tmp23
tmp25 = tl.load(in_ptr0 + (64 + x0 + (4*((-16) + x1)) + (16*x2)), tmp24 & xmask, other=0.0)
tmp26 = tmp0 >= tmp22
tmp27 = tl.full([1], 24, tl.int64)
tmp28 = tmp0 < tmp27
tmp29 = tl.load(in_ptr0 + (128 + x0 + (4*((-20) + x1)) + (16*x2)), tmp26 & xmask, other=0.0)
tmp30 = tl.where(tmp24, tmp25, tmp29)
tmp31 = tl.where(tmp19, tmp20, tmp30)
tmp32 = tl.where(tmp14, tmp15, tmp31)
tmp33 = tl.where(tmp9, tmp10, tmp32)
tmp34 = tl.where(tmp4, tmp5, tmp33)
tl.store(out_ptr0 + (x3), tmp34, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/3d/c3d5ce25tfocluzh3yh76j6qjeohfmdemkusyhh2wn2xdyzoda7p.py
# Topologically Sorted Source Nodes: [q], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# q => cat_1
# Graph fragment:
# %cat_1 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%select_6, %select_7, %select_8, %select_9, %select_10, %select_11], 1), kwargs = {})
triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 24
x0 = xindex % 4
x2 = (xindex // 96)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (64 + x0 + (4*x1) + (16*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (128 + x0 + (4*((-4) + x1)) + (16*x2)), tmp9 & xmask, other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (192 + x0 + (4*((-8) + x1)) + (16*x2)), tmp14 & xmask, other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 16, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr0 + (128 + x0 + (4*((-12) + x1)) + (16*x2)), tmp19 & xmask, other=0.0)
tmp21 = tmp0 >= tmp17
tmp22 = tl.full([1], 20, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tmp21 & tmp23
tmp25 = tl.load(in_ptr0 + (192 + x0 + (4*((-16) + x1)) + (16*x2)), tmp24 & xmask, other=0.0)
tmp26 = tmp0 >= tmp22
tmp27 = tl.full([1], 24, tl.int64)
tmp28 = tmp0 < tmp27
tmp29 = tl.load(in_ptr0 + (192 + x0 + (4*((-20) + x1)) + (16*x2)), tmp26 & xmask, other=0.0)
tmp30 = tl.where(tmp24, tmp25, tmp29)
tmp31 = tl.where(tmp19, tmp20, tmp30)
tmp32 = tl.where(tmp14, tmp15, tmp31)
tmp33 = tl.where(tmp9, tmp10, tmp32)
tmp34 = tl.where(tmp4, tmp5, tmp33)
tl.store(out_ptr0 + (x3), tmp34, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/wm/cwmiv6irrhvdznioij35ss6z35p7cnponuwhiju4tyoert736dwz.py
# Topologically Sorted Source Nodes: [inner_product, inner_product_1], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# inner_product => mul
# inner_product_1 => sum_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%cat, %cat_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [2], True), kwargs = {})
triton_poi_fused_mul_sum_2 = async_compile.triton('triton_poi_fused_mul_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 96
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + (x0), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 24, 4), (96, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [p], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(arg0_1, buf0, 384, grid=grid(384), stream=stream0)
buf1 = empty_strided_cuda((4, 24, 4), (96, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [q], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(arg0_1, buf1, 384, grid=grid(384), stream=stream0)
del arg0_1
buf2 = empty_strided_cuda((4, 24, 1), (24, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [inner_product, inner_product_1], Original ATen: [aten.mul, aten.sum]
triton_poi_fused_mul_sum_2.run(buf0, buf1, buf2, 96, grid=grid(96), stream=stream0)
del buf0
del buf1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from sklearn.metrics import *
class InnerProductLayer(nn.Module):
"""InnerProduct Layer used in PNN that compute the element-wise
product or inner product between feature vectors.
Input shape
- a list of 3D tensor with shape: ``(batch_size,1,embedding_size)``.
Output shape
- 3D tensor with shape: ``(batch_size, N*(N-1)/2 ,1)`` if use reduce_sum. or 3D tensor with shape:
``(batch_size, N*(N-1)/2, embedding_size )`` if not use reduce_sum.
Arguments
- **reduce_sum**: bool. Whether return inner product or element-wise product
References
- [Qu Y, Cai H, Ren K, et al. Product-based neural networks for user response prediction[C]//
Data Mining (ICDM), 2016 IEEE 16th International Conference on. IEEE, 2016: 1149-1154.]
(https://arxiv.org/pdf/1611.00144.pdf)"""
def __init__(self, reduce_sum=True, device='cpu'):
super(InnerProductLayer, self).__init__()
self.reduce_sum = reduce_sum
self
def forward(self, inputs):
embed_list = inputs
row = []
col = []
num_inputs = len(embed_list)
for i in range(num_inputs - 1):
for j in range(i + 1, num_inputs):
row.append(i)
col.append(j)
p = torch.cat([embed_list[idx] for idx in row], dim=1)
q = torch.cat([embed_list[idx] for idx in col], dim=1)
inner_product = p * q
if self.reduce_sum:
inner_product = torch.sum(inner_product, dim=2, keepdim=True)
return inner_product
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from sklearn.metrics import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 24
x0 = xindex % 4
x2 = xindex // 96
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (x0 + 4 * (-4 + x1) + 16 * x2), tmp9 & xmask,
other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (x0 + 4 * (-8 + x1) + 16 * x2), tmp14 & xmask,
other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 16, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr0 + (64 + x0 + 4 * (-12 + x1) + 16 * x2), tmp19 &
xmask, other=0.0)
tmp21 = tmp0 >= tmp17
tmp22 = tl.full([1], 20, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tmp21 & tmp23
tmp25 = tl.load(in_ptr0 + (64 + x0 + 4 * (-16 + x1) + 16 * x2), tmp24 &
xmask, other=0.0)
tmp26 = tmp0 >= tmp22
tl.full([1], 24, tl.int64)
tmp29 = tl.load(in_ptr0 + (128 + x0 + 4 * (-20 + x1) + 16 * x2), tmp26 &
xmask, other=0.0)
tmp30 = tl.where(tmp24, tmp25, tmp29)
tmp31 = tl.where(tmp19, tmp20, tmp30)
tmp32 = tl.where(tmp14, tmp15, tmp31)
tmp33 = tl.where(tmp9, tmp10, tmp32)
tmp34 = tl.where(tmp4, tmp5, tmp33)
tl.store(out_ptr0 + x3, tmp34, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 24
x0 = xindex % 4
x2 = xindex // 96
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (64 + x0 + 4 * x1 + 16 * x2), tmp4 & xmask,
other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (128 + x0 + 4 * (-4 + x1) + 16 * x2), tmp9 &
xmask, other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (192 + x0 + 4 * (-8 + x1) + 16 * x2), tmp14 &
xmask, other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 16, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr0 + (128 + x0 + 4 * (-12 + x1) + 16 * x2), tmp19 &
xmask, other=0.0)
tmp21 = tmp0 >= tmp17
tmp22 = tl.full([1], 20, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tmp21 & tmp23
tmp25 = tl.load(in_ptr0 + (192 + x0 + 4 * (-16 + x1) + 16 * x2), tmp24 &
xmask, other=0.0)
tmp26 = tmp0 >= tmp22
tl.full([1], 24, tl.int64)
tmp29 = tl.load(in_ptr0 + (192 + x0 + 4 * (-20 + x1) + 16 * x2), tmp26 &
xmask, other=0.0)
tmp30 = tl.where(tmp24, tmp25, tmp29)
tmp31 = tl.where(tmp19, tmp20, tmp30)
tmp32 = tl.where(tmp14, tmp15, tmp31)
tmp33 = tl.where(tmp9, tmp10, tmp32)
tmp34 = tl.where(tmp4, tmp5, tmp33)
tl.store(out_ptr0 + x3, tmp34, xmask)
@triton.jit
def triton_poi_fused_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 96
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x0, tmp14, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 24, 4), (96, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(384)](arg0_1, buf0, 384, XBLOCK=128,
num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 24, 4), (96, 4, 1), torch.float32)
triton_poi_fused_cat_1[grid(384)](arg0_1, buf1, 384, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
buf2 = empty_strided_cuda((4, 24, 1), (24, 1, 1), torch.float32)
triton_poi_fused_mul_sum_2[grid(96)](buf0, buf1, buf2, 96, XBLOCK=
128, num_warps=4, num_stages=1)
del buf0
del buf1
return buf2,
class InnerProductLayerNew(nn.Module):
"""InnerProduct Layer used in PNN that compute the element-wise
product or inner product between feature vectors.
Input shape
- a list of 3D tensor with shape: ``(batch_size,1,embedding_size)``.
Output shape
- 3D tensor with shape: ``(batch_size, N*(N-1)/2 ,1)`` if use reduce_sum. or 3D tensor with shape:
``(batch_size, N*(N-1)/2, embedding_size )`` if not use reduce_sum.
Arguments
- **reduce_sum**: bool. Whether return inner product or element-wise product
References
- [Qu Y, Cai H, Ren K, et al. Product-based neural networks for user response prediction[C]//
Data Mining (ICDM), 2016 IEEE 16th International Conference on. IEEE, 2016: 1149-1154.]
(https://arxiv.org/pdf/1611.00144.pdf)"""
def __init__(self, reduce_sum=True, device='cpu'):
super(InnerProductLayerNew, self).__init__()
self.reduce_sum = reduce_sum
self
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Sunmyunghan/Final_Project | InnerProductLayer | false | 1,221 | [
"MIT"
] | 0 | 28cde293dc6d07521b2e1c5613b20444aea91d21 | https://github.com/Sunmyunghan/Final_Project/tree/28cde293dc6d07521b2e1c5613b20444aea91d21 | import torch
import torch.nn as nn
from sklearn.metrics import *
class Model(nn.Module):
"""InnerProduct Layer used in PNN that compute the element-wise
product or inner product between feature vectors.
Input shape
- a list of 3D tensor with shape: ``(batch_size,1,embedding_size)``.
Output shape
- 3D tensor with shape: ``(batch_size, N*(N-1)/2 ,1)`` if use reduce_sum. or 3D tensor with shape:
``(batch_size, N*(N-1)/2, embedding_size )`` if not use reduce_sum.
Arguments
- **reduce_sum**: bool. Whether return inner product or element-wise product
References
- [Qu Y, Cai H, Ren K, et al. Product-based neural networks for user response prediction[C]//
Data Mining (ICDM), 2016 IEEE 16th International Conference on. IEEE, 2016: 1149-1154.]
(https://arxiv.org/pdf/1611.00144.pdf)"""
def __init__(self, reduce_sum=True, device='cpu'):
super().__init__()
self.reduce_sum = reduce_sum
self
def forward(self, inputs):
embed_list = inputs
row = []
col = []
num_inputs = len(embed_list)
for i in range(num_inputs - 1):
for j in range(i + 1, num_inputs):
row.append(i)
col.append(j)
p = torch.cat([embed_list[idx] for idx in row], dim=1)
q = torch.cat([embed_list[idx] for idx in col], dim=1)
inner_product = p * q
if self.reduce_sum:
inner_product = torch.sum(inner_product, dim=2, keepdim=True)
return inner_product
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
TransferNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/6z/c6zp6ex7l6je4eaeyrwdsgxas67mkc5y6ehm56jynqsnopblmcu3.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.reflection_pad2d]
# Source node to ATen node mapping:
# x => _unsafe_index, _unsafe_index_1
# Graph fragment:
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {})
triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65712
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 74
x1 = (xindex // 74) % 74
x2 = (xindex // 5476)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4095 + ((-1)*(tl_math.abs((-63) + (tl_math.abs((-5) + x0))))) + ((-64)*(tl_math.abs((-63) + (tl_math.abs((-5) + x1))))) + (4096*x2)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/cc/cccekbpoouz4zzgzos2c56sobuvklb5pmasacbllwb3jzohxpuy7.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# x_1 => convolution
# x_2 => add, rsqrt, var_mean
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_red_fused__native_batch_norm_legit_convolution_1 = async_compile.triton('triton_red_fused__native_batch_norm_legit_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[128, 8192],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__native_batch_norm_legit_convolution_1', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused__native_batch_norm_legit_convolution_1(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 128
rnumel = 4356
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x3 = xindex
x0 = xindex % 32
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp4_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_out_ptr0 + (r2 + (4356*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp4_mean_next, tmp4_m2_next, tmp4_weight_next = triton_helpers.welford_reduce(
tmp3, tmp4_mean, tmp4_m2, tmp4_weight, roffset == 0
)
tmp4_mean = tl.where(rmask & xmask, tmp4_mean_next, tmp4_mean)
tmp4_m2 = tl.where(rmask & xmask, tmp4_m2_next, tmp4_m2)
tmp4_weight = tl.where(rmask & xmask, tmp4_weight_next, tmp4_weight)
tl.store(in_out_ptr0 + (r2 + (4356*x3)), tmp2, rmask & xmask)
tmp4_tmp, tmp5_tmp, tmp6_tmp = triton_helpers.welford(
tmp4_mean, tmp4_m2, tmp4_weight, 1
)
tmp4 = tmp4_tmp[:, None]
tmp5 = tmp5_tmp[:, None]
tmp6 = tmp6_tmp[:, None]
tl.store(out_ptr0 + (x3), tmp4, xmask)
tmp7 = 4356.0
tmp8 = tmp5 / tmp7
tmp9 = 1e-05
tmp10 = tmp8 + tmp9
tmp11 = libdevice.rsqrt(tmp10)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x3), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/in/ciny2bql3sygecchlvr6rxw73jnhl7dgi3s5w2g2fefaoug53zzz.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.repeat]
# Source node to ATen node mapping:
# x_2 => repeat
# Graph fragment:
# %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_4, [4]), kwargs = {})
triton_poi_fused_repeat_2 = async_compile.triton('triton_poi_fused_repeat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_repeat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0 % 32), xmask)
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/n2/cn255v2yj5fbdnwsnxypmsjna6khzwy4jw6j2ry4orupe5aqy6iw.py
# Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.relu, aten.reflection_pad2d]
# Source node to ATen node mapping:
# x_3 => relu
# x_4 => _unsafe_index_2, _unsafe_index_3
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu, [None, None, %sub_6, None]), kwargs = {})
# %_unsafe_index_3 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_2, [None, None, None, %sub_6]), kwargs = {})
triton_poi_fused_reflection_pad2d_relu_3 = async_compile.triton('triton_poi_fused_reflection_pad2d_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_relu_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 574592
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 67
x1 = (xindex // 67) % 67
x2 = (xindex // 4489)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4355 + ((-1)*(tl_math.abs((-65) + (tl_math.abs((-1) + x0))))) + ((-66)*(tl_math.abs((-65) + (tl_math.abs((-1) + x1))))) + (4356*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x2), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x2), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/xz/cxzh6nqsjelve5ob25xfne5gachxdayce26fuvsylva6jrnjcgu5.py
# Topologically Sorted Source Nodes: [x_5, x_6], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# x_5 => convolution_1
# x_6 => add_2, rsqrt_1, var_mean_1
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_3, %primals_6, %primals_7, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_2, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {})
triton_red_fused__native_batch_norm_legit_convolution_4 = async_compile.triton('triton_red_fused__native_batch_norm_legit_convolution_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[256, 2048],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__native_batch_norm_legit_convolution_4', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused__native_batch_norm_legit_convolution_4(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 256
rnumel = 1089
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x3 = xindex
x0 = xindex % 64
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp4_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_out_ptr0 + (r2 + (1089*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp4_mean_next, tmp4_m2_next, tmp4_weight_next = triton_helpers.welford_reduce(
tmp3, tmp4_mean, tmp4_m2, tmp4_weight, roffset == 0
)
tmp4_mean = tl.where(rmask & xmask, tmp4_mean_next, tmp4_mean)
tmp4_m2 = tl.where(rmask & xmask, tmp4_m2_next, tmp4_m2)
tmp4_weight = tl.where(rmask & xmask, tmp4_weight_next, tmp4_weight)
tl.store(in_out_ptr0 + (r2 + (1089*x3)), tmp2, rmask & xmask)
tmp4_tmp, tmp5_tmp, tmp6_tmp = triton_helpers.welford(
tmp4_mean, tmp4_m2, tmp4_weight, 1
)
tmp4 = tmp4_tmp[:, None]
tmp5 = tmp5_tmp[:, None]
tmp6 = tmp6_tmp[:, None]
tl.store(out_ptr0 + (x3), tmp4, xmask)
tmp7 = 1089.0
tmp8 = tmp5 / tmp7
tmp9 = 1e-05
tmp10 = tmp8 + tmp9
tmp11 = libdevice.rsqrt(tmp10)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x3), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/bo/cbop6byfkkzzjktajzua3ovnpvhy32nxb7dbv364jfeaxunlv7bo.py
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.repeat]
# Source node to ATen node mapping:
# x_6 => repeat_2
# Graph fragment:
# %repeat_2 : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_8, [4]), kwargs = {})
triton_poi_fused_repeat_5 = async_compile.triton('triton_poi_fused_repeat_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_repeat_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_repeat_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0 % 64), xmask)
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/pu/cpuzqhw3biucgjwhzz5zpno5fys7fmjfedq7qg5cdqio5ht2p77j.py
# Topologically Sorted Source Nodes: [x_7, x_8], Original ATen: [aten.relu, aten.reflection_pad2d]
# Source node to ATen node mapping:
# x_7 => relu_1
# x_8 => _unsafe_index_4, _unsafe_index_5
# Graph fragment:
# %relu_1 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %_unsafe_index_4 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_1, [None, None, %sub_11, None]), kwargs = {})
# %_unsafe_index_5 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_4, [None, None, None, %sub_11]), kwargs = {})
triton_poi_fused_reflection_pad2d_relu_6 = async_compile.triton('triton_poi_fused_reflection_pad2d_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_relu_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_relu_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 295936
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 34
x1 = (xindex // 34) % 34
x2 = (xindex // 1156)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (1088 + ((-1)*(tl_math.abs((-32) + (tl_math.abs((-1) + x0))))) + ((-33)*(tl_math.abs((-32) + (tl_math.abs((-1) + x1))))) + (1089*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x2), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x2), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/nr/cnrulaca7axjpejfelvoukj4275yefkepol6ssk73jegol2a7v4j.py
# Topologically Sorted Source Nodes: [x_9, x_10], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# x_10 => add_4, rsqrt_2, var_mean_2
# x_9 => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_5, %primals_10, %primals_11, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean_2 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_4, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_4, 1e-05), kwargs = {})
# %rsqrt_2 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {})
triton_per_fused__native_batch_norm_legit_convolution_7 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[512, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_7', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_7(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel):
xnumel = 512
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (r2 + (256*x3)), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = tl.broadcast_to(tmp3, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = tl.full([1], 256, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp3 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tl.store(in_out_ptr0 + (r2 + (256*x3)), tmp2, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x3), tmp20, None)
tl.store(out_ptr0 + (x3), tmp10, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/cx/ccx6gci2ib4oohea5xn4ov5ur2nz2zods6mnyw3romkzf5s25lx3.py
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.repeat]
# Source node to ATen node mapping:
# x_10 => repeat_4
# Graph fragment:
# %repeat_4 : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_12, [4]), kwargs = {})
triton_poi_fused_repeat_8 = async_compile.triton('triton_poi_fused_repeat_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_repeat_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_repeat_8(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0 % 128), xmask)
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/cb/ccbukoi7vclz6u6p3devgjverddcql7canz2j67ieotvy3arkary.py
# Topologically Sorted Source Nodes: [x_11, x_12], Original ATen: [aten.relu, aten.reflection_pad2d]
# Source node to ATen node mapping:
# x_11 => relu_2
# x_12 => _unsafe_index_6, _unsafe_index_7
# Graph fragment:
# %relu_2 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_5,), kwargs = {})
# %_unsafe_index_6 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_2, [None, None, %sub_16, None]), kwargs = {})
# %_unsafe_index_7 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_6, [None, None, None, %sub_16]), kwargs = {})
triton_poi_fused_reflection_pad2d_relu_9 = async_compile.triton('triton_poi_fused_reflection_pad2d_relu_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_relu_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_relu_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 165888
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 18
x1 = (xindex // 18) % 18
x2 = (xindex // 324)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (255 + ((-1)*(tl_math.abs((-15) + (tl_math.abs((-1) + x0))))) + ((-16)*(tl_math.abs((-15) + (tl_math.abs((-1) + x1))))) + (256*x2)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x2), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x2), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x2), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + (x3), tmp10, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/hs/chsmvdjd5wcez3hx37l5f5urhkvxeg2zxbjltpjflrpl22zdydh7.py
# Topologically Sorted Source Nodes: [x_13, x_14, x_15], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit, aten.relu]
# Source node to ATen node mapping:
# x_13 => convolution_3
# x_14 => add_6, repeat_6, repeat_7, rsqrt_3, var_mean_3
# x_15 => relu_3
# Graph fragment:
# %convolution_3 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_7, %primals_14, %primals_15, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %repeat_6 : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_16, [4]), kwargs = {})
# %repeat_7 : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_17, [4]), kwargs = {})
# %var_mean_3 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_6, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_6, 1e-05), kwargs = {})
# %rsqrt_3 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_6,), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_7,), kwargs = {})
triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_10 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[512, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_10', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': True, 'num_load': 4, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_10(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, rnumel):
xnumel = 512
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
x0 = xindex
r3 = rindex
x1 = xindex % 128
tmp0 = tl.load(in_ptr0 + (x0 % 128), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 % 128), None, eviction_policy='evict_last')
tmp2 = tl.load(in_out_ptr0 + (r3 + (256*x0)), None)
tmp3 = tl.load(in_ptr2 + (x1), None, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp5 = tl.broadcast_to(tmp4, [RBLOCK])
tmp7 = tl.broadcast_to(tmp5, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = tl.full([1], 256, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp5 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [RBLOCK])
tmp17 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0))
tmp18 = 256.0
tmp19 = tmp17 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp4 - tmp12
tmp24 = tmp23 * tmp22
tmp25 = tmp24 * tmp0
tmp26 = tmp25 + tmp1
tmp27 = tl.full([1], 0, tl.int32)
tmp28 = triton_helpers.maximum(tmp27, tmp26)
tl.store(out_ptr0 + (x0), tmp0, None)
tl.store(out_ptr1 + (x0), tmp1, None)
tl.store(in_out_ptr0 + (r3 + (256*x0)), tmp4, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x0), tmp22, None)
tl.store(out_ptr3 + (r3 + (256*x0)), tmp28, None)
tl.store(out_ptr2 + (x0), tmp12, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/tt/cttvoi63nbrkzaawfkfj7kof5gg7rdu7erfcx3zznexrlxhhqpbx.py
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.reflection_pad2d]
# Source node to ATen node mapping:
# x_16 => _unsafe_index_8, _unsafe_index_9
# Graph fragment:
# %_unsafe_index_8 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_3, [None, None, %sub_16, None]), kwargs = {})
# %_unsafe_index_9 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_8, [None, None, None, %sub_16]), kwargs = {})
triton_poi_fused_reflection_pad2d_11 = async_compile.triton('triton_poi_fused_reflection_pad2d_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_11(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 165888
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 18
x1 = (xindex // 18) % 18
x2 = (xindex // 324)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (255 + ((-1)*(tl_math.abs((-15) + (tl_math.abs((-1) + x0))))) + ((-16)*(tl_math.abs((-15) + (tl_math.abs((-1) + x1))))) + (256*x2)), None, eviction_policy='evict_last')
tl.store(out_ptr0 + (x3), tmp0, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/oo/coonli7toumwjdnagyhu5xmu3wzpfj6wzlk3negwockmloxtypls.py
# Topologically Sorted Source Nodes: [x_19, x_20, x_21], Original ATen: [aten.relu, aten.add, aten.reflection_pad2d]
# Source node to ATen node mapping:
# x_19 => relu_4
# x_20 => add_10
# x_21 => _unsafe_index_10, _unsafe_index_11
# Graph fragment:
# %relu_4 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_9,), kwargs = {})
# %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu_3, %relu_4), kwargs = {})
# %_unsafe_index_10 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%add_10, [None, None, %sub_16, None]), kwargs = {})
# %_unsafe_index_11 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_10, [None, None, None, %sub_16]), kwargs = {})
triton_poi_fused_add_reflection_pad2d_relu_12 = async_compile.triton('triton_poi_fused_add_reflection_pad2d_relu_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_reflection_pad2d_relu_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_reflection_pad2d_relu_12(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 165888
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 18
x1 = (xindex // 18) % 18
x2 = (xindex // 324)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (255 + ((-1)*(tl_math.abs((-15) + (tl_math.abs((-1) + x0))))) + ((-16)*(tl_math.abs((-15) + (tl_math.abs((-1) + x1))))) + (256*x2)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (255 + ((-1)*(tl_math.abs((-15) + (tl_math.abs((-1) + x0))))) + ((-16)*(tl_math.abs((-15) + (tl_math.abs((-1) + x1))))) + (256*x2)), None, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x2), None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr3 + (x2), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + (x2), None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr5 + (x2), None, eviction_policy='evict_last')
tmp3 = tmp1 - tmp2
tmp5 = tmp3 * tmp4
tmp7 = tmp5 * tmp6
tmp9 = tmp7 + tmp8
tmp10 = tl.full([1], 0, tl.int32)
tmp11 = triton_helpers.maximum(tmp10, tmp9)
tmp12 = tmp0 + tmp11
tl.store(out_ptr0 + (x3), tmp12, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/rb/crbic5nlnsjyr3okgxosok7y7jbjadlfwpgd2t62anmhmxmicdwt.py
# Topologically Sorted Source Nodes: [x_58], Original ATen: [aten.arange]
# Source node to ATen node mapping:
# x_58 => iota_28
# Graph fragment:
# %iota_28 : [num_users=3] = call_function[target=torch.ops.prims.iota.default](args = (32,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
triton_poi_fused_arange_13 = async_compile.triton('triton_poi_fused_arange_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_arange_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_arange_13(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/lh/clhmroknisxpfulbzxd35lthz4fgwlqlou2jn6t3wegzuda2pndf.py
# Topologically Sorted Source Nodes: [x_58], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
# Source node to ATen node mapping:
# x_58 => add_31, add_32, convert_element_type, convert_element_type_1, mul_26, mul_27
# Graph fragment:
# %mul_26 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_28, 1), kwargs = {})
# %add_31 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_26, 0), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_31, torch.float32), kwargs = {})
# %add_32 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.0), kwargs = {})
# %mul_27 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_32, 0.5), kwargs = {})
# %convert_element_type_1 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_27, torch.int64), kwargs = {})
triton_poi_fused__to_copy_add_arange_mul_14 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_14(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/xz/cxzxvmd5v3ctwfqe6annx2nolajigrq4yekqbvwku5szxmckjb5v.py
# Topologically Sorted Source Nodes: [x_55, x_56, x_57, x_58, x_59], Original ATen: [aten.relu, aten.add, aten.reflection_pad2d, aten._unsafe_index]
# Source node to ATen node mapping:
# x_55 => relu_12
# x_56 => add_30
# x_57 => _unsafe_index_26, _unsafe_index_27
# x_58 => _unsafe_index_28
# x_59 => _unsafe_index_29
# Graph fragment:
# %relu_12 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_25,), kwargs = {})
# %add_30 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu_11, %relu_12), kwargs = {})
# %_unsafe_index_26 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%add_30, [None, None, %sub_66, None]), kwargs = {})
# %_unsafe_index_27 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_26, [None, None, None, %sub_66]), kwargs = {})
# %_unsafe_index_28 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_27, [None, None, %unsqueeze_52, %convert_element_type_1]), kwargs = {})
# %_unsafe_index_29 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_28, [None, None, %sub_70, None]), kwargs = {})
triton_poi_fused__unsafe_index_add_reflection_pad2d_relu_15 = async_compile.triton('triton_poi_fused__unsafe_index_add_reflection_pad2d_relu_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_reflection_pad2d_relu_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_add_reflection_pad2d_relu_15(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 557056
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 32) % 34
x0 = xindex % 32
x2 = (xindex // 1088)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (31 + ((-1)*(tl_math.abs((-31) + (tl_math.abs((-1) + x1)))))), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + (x2), None, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr4 + (x2), None, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr5 + (x2), None, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr6 + (x2), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 16, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (255 + ((-1)*(tl_math.abs((-15) + tmp8))) + ((-16)*(tl_math.abs((-15) + tmp4))) + (256*x2)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (255 + ((-1)*(tl_math.abs((-15) + tmp8))) + ((-16)*(tl_math.abs((-15) + tmp4))) + (256*x2)), None, eviction_policy='evict_last')
tmp12 = tmp10 - tmp11
tmp14 = tmp12 * tmp13
tmp16 = tmp14 * tmp15
tmp18 = tmp16 + tmp17
tmp19 = tl.full([1], 0, tl.int32)
tmp20 = triton_helpers.maximum(tmp19, tmp18)
tmp21 = tmp9 + tmp20
tl.store(out_ptr0 + (x4), tmp21, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/os/cosc266h7wvveiabyxuy4nbeqm5psm2jxkrmtwz6pznjwe3cke65.py
# Topologically Sorted Source Nodes: [x_59], Original ATen: [aten.reflection_pad2d]
# Source node to ATen node mapping:
# x_59 => _unsafe_index_30
# Graph fragment:
# %_unsafe_index_30 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_29, [None, None, None, %sub_70]), kwargs = {})
triton_poi_fused_reflection_pad2d_16 = async_compile.triton('triton_poi_fused_reflection_pad2d_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_16(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 591872
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 34
x1 = (xindex // 34)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (31 + ((-1)*(tl_math.abs((-31) + (tl_math.abs((-1) + x0))))) + (32*x1)), None, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2), tmp0, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/oe/coevdbvv37usd27ae26ara6xiwj7sa7dv2r7eewjinl3gdmwonuu.py
# Topologically Sorted Source Nodes: [x_60, x_61], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# x_60 => convolution_13
# x_61 => add_35, rsqrt_13, var_mean_13
# Graph fragment:
# %convolution_13 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_30, %primals_54, %primals_55, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean_13 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_26, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_35 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_26, 1e-05), kwargs = {})
# %rsqrt_13 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_35,), kwargs = {})
triton_per_fused__native_batch_norm_legit_convolution_17 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_17', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[256, 1024],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_17', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_17(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel):
xnumel = 256
XBLOCK: tl.constexpr = 1
rnumel = 1024
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (r2 + (1024*x3)), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = tl.broadcast_to(tmp3, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = tl.full([1], 1024, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp3 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 1024.0
tmp17 = tmp15 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tl.store(in_out_ptr0 + (r2 + (1024*x3)), tmp2, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x3), tmp20, None)
tl.store(out_ptr0 + (x3), tmp10, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ou/couo6xzmjwqbsh4aninhzxrfm7t6zlt3vyqlw6f6d3t6xh3frbfg.py
# Topologically Sorted Source Nodes: [x_64], Original ATen: [aten.arange]
# Source node to ATen node mapping:
# x_64 => iota_34
# Graph fragment:
# %iota_34 : [num_users=2] = call_function[target=torch.ops.prims.iota.default](args = (64,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
triton_poi_fused_arange_18 = async_compile.triton('triton_poi_fused_arange_18', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_arange_18', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_arange_18(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ud/cudpb3kdyvqb7qiovaslx7kchi6orczekfulhrfx5wv6frsnspck.py
# Topologically Sorted Source Nodes: [x_64], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
# Source node to ATen node mapping:
# x_64 => add_37, add_38, convert_element_type_4, convert_element_type_5, mul_32, mul_33
# Graph fragment:
# %mul_32 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_34, 1), kwargs = {})
# %add_37 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_32, 0), kwargs = {})
# %convert_element_type_4 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_37, torch.float32), kwargs = {})
# %add_38 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_4, 0.0), kwargs = {})
# %mul_33 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_38, 0.5), kwargs = {})
# %convert_element_type_5 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_33, torch.int64), kwargs = {})
triton_poi_fused__to_copy_add_arange_mul_19 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_19', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_19', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_19(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/vo/cvor2isivy3yg7msgpkv5xnnhsipwsewsjhiw6m7eptwgflsriik.py
# Topologically Sorted Source Nodes: [x_62, x_63, x_64, x_65], Original ATen: [aten.relu, aten.reflection_pad2d, aten._unsafe_index]
# Source node to ATen node mapping:
# x_62 => relu_13
# x_63 => _unsafe_index_31, _unsafe_index_32
# x_64 => _unsafe_index_33
# x_65 => _unsafe_index_34, _unsafe_index_35
# Graph fragment:
# %relu_13 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_27,), kwargs = {})
# %_unsafe_index_31 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_13, [None, None, %sub_75, None]), kwargs = {})
# %_unsafe_index_32 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_31, [None, None, None, %sub_75]), kwargs = {})
# %_unsafe_index_33 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_32, [None, None, %unsqueeze_57, %convert_element_type_5]), kwargs = {})
# %_unsafe_index_34 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_33, [None, None, %sub_79, None]), kwargs = {})
# %_unsafe_index_35 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_34, [None, None, None, %sub_79]), kwargs = {})
triton_poi_fused__unsafe_index_reflection_pad2d_relu_20 = async_compile.triton('triton_poi_fused__unsafe_index_reflection_pad2d_relu_20', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_reflection_pad2d_relu_20', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_reflection_pad2d_relu_20(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1115136
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 66) % 66
x0 = xindex % 66
x2 = (xindex // 4356)
x5 = xindex
tmp0 = tl.load(in_ptr0 + (63 + ((-1)*(tl_math.abs((-63) + (tl_math.abs((-1) + x1)))))), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (63 + ((-1)*(tl_math.abs((-63) + (tl_math.abs((-1) + x0)))))), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr3 + (x2), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr4 + (x2), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + (x2), xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 32, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (1023 + ((-1)*(tl_math.abs((-31) + tmp8))) + ((-32)*(tl_math.abs((-31) + tmp4))) + (1024*x2)), xmask, eviction_policy='evict_last')
tmp11 = tmp9 - tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 * tmp14
tmp17 = tmp15 + tmp16
tmp18 = tl.full([1], 0, tl.int32)
tmp19 = triton_helpers.maximum(tmp18, tmp17)
tl.store(out_ptr0 + (x5), tmp19, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/uv/cuvh6ka5moywn3lyxkqjfsct23hu3t4t2l7akqp5s66jv334mgec.py
# Topologically Sorted Source Nodes: [x_66, x_67], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# x_66 => convolution_14
# x_67 => add_41, rsqrt_14, var_mean_14
# Graph fragment:
# %convolution_14 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_35, %primals_58, %primals_59, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean_14 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_28, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_41 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_28, 1e-05), kwargs = {})
# %rsqrt_14 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_41,), kwargs = {})
triton_red_fused__native_batch_norm_legit_convolution_21 = async_compile.triton('triton_red_fused__native_batch_norm_legit_convolution_21', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[128, 4096],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__native_batch_norm_legit_convolution_21', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused__native_batch_norm_legit_convolution_21(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 128
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x3 = xindex
x0 = xindex % 32
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp4_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_out_ptr0 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp4_mean_next, tmp4_m2_next, tmp4_weight_next = triton_helpers.welford_reduce(
tmp3, tmp4_mean, tmp4_m2, tmp4_weight, roffset == 0
)
tmp4_mean = tl.where(rmask & xmask, tmp4_mean_next, tmp4_mean)
tmp4_m2 = tl.where(rmask & xmask, tmp4_m2_next, tmp4_m2)
tmp4_weight = tl.where(rmask & xmask, tmp4_weight_next, tmp4_weight)
tl.store(in_out_ptr0 + (r2 + (4096*x3)), tmp2, rmask & xmask)
tmp4_tmp, tmp5_tmp, tmp6_tmp = triton_helpers.welford(
tmp4_mean, tmp4_m2, tmp4_weight, 1
)
tmp4 = tmp4_tmp[:, None]
tmp5 = tmp5_tmp[:, None]
tmp6 = tmp6_tmp[:, None]
tl.store(out_ptr0 + (x3), tmp4, xmask)
tmp7 = 4096.0
tmp8 = tmp5 / tmp7
tmp9 = 1e-05
tmp10 = tmp8 + tmp9
tmp11 = libdevice.rsqrt(tmp10)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x3), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/uu/cuuhbrvjuevz367ve2ajclcc6uy5k2vtz5hjdsrcf7qo736pfhc3.py
# Topologically Sorted Source Nodes: [x_68, x_69], Original ATen: [aten.relu, aten.reflection_pad2d]
# Source node to ATen node mapping:
# x_68 => relu_14
# x_69 => _unsafe_index_36, _unsafe_index_37
# Graph fragment:
# %relu_14 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_29,), kwargs = {})
# %_unsafe_index_36 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_14, [None, None, %sub_84, None]), kwargs = {})
# %_unsafe_index_37 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_36, [None, None, None, %sub_84]), kwargs = {})
triton_poi_fused_reflection_pad2d_relu_22 = async_compile.triton('triton_poi_fused_reflection_pad2d_relu_22', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_relu_22', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_relu_22(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 663552
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 72
x1 = (xindex // 72) % 72
x2 = (xindex // 5184)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4095 + ((-1)*(tl_math.abs((-63) + (tl_math.abs((-4) + x0))))) + ((-64)*(tl_math.abs((-63) + (tl_math.abs((-4) + x1))))) + (4096*x2)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x2), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x2), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x2), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + (x3), tmp10, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/k6/ck6kvb4rgbhsot5d63arefzdafqmmalawdaegylknrecpeoxc6hh.py
# Topologically Sorted Source Nodes: [x_70, x_71, x_72], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit, aten.sigmoid]
# Source node to ATen node mapping:
# x_70 => convolution_15
# x_71 => add_43, repeat_30, rsqrt_15, var_mean_15
# x_72 => sigmoid
# Graph fragment:
# %convolution_15 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_37, %primals_62, %primals_63, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %repeat_30 : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_64, [4]), kwargs = {})
# %var_mean_15 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_30, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_43 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_30, 1e-05), kwargs = {})
# %rsqrt_15 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_43,), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_31,), kwargs = {})
triton_red_fused__native_batch_norm_legit_convolution_repeat_sigmoid_23 = async_compile.triton('triton_red_fused__native_batch_norm_legit_convolution_repeat_sigmoid_23', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[16, 4096],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__native_batch_norm_legit_convolution_repeat_sigmoid_23', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused__native_batch_norm_legit_convolution_repeat_sigmoid_23(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 12
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0 % 3), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
x1 = xindex % 3
tmp2 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp5_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp5_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp5_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r3 = rindex
tmp1 = tl.load(in_out_ptr0 + (r3 + (4096*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp3 = tmp1 + tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp5_mean_next, tmp5_m2_next, tmp5_weight_next = triton_helpers.welford_reduce(
tmp4, tmp5_mean, tmp5_m2, tmp5_weight, roffset == 0
)
tmp5_mean = tl.where(rmask & xmask, tmp5_mean_next, tmp5_mean)
tmp5_m2 = tl.where(rmask & xmask, tmp5_m2_next, tmp5_m2)
tmp5_weight = tl.where(rmask & xmask, tmp5_weight_next, tmp5_weight)
tl.store(in_out_ptr0 + (r3 + (4096*x0)), tmp3, rmask & xmask)
tmp5_tmp, tmp6_tmp, tmp7_tmp = triton_helpers.welford(
tmp5_mean, tmp5_m2, tmp5_weight, 1
)
tmp5 = tmp5_tmp[:, None]
tmp6 = tmp6_tmp[:, None]
tmp7 = tmp7_tmp[:, None]
tl.store(out_ptr1 + (x0), tmp5, xmask)
tmp17 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r3 = rindex
tmp8 = tl.load(in_out_ptr0 + (r3 + (4096*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp9 = tmp8 - tmp5
tmp10 = 4096.0
tmp11 = tmp6 / tmp10
tmp12 = 1e-05
tmp13 = tmp11 + tmp12
tmp14 = libdevice.rsqrt(tmp13)
tmp15 = tmp9 * tmp14
tmp16 = tmp15 * tmp0
tmp18 = tmp16 + tmp17
tmp19 = tl.sigmoid(tmp18)
tl.store(out_ptr3 + (r3 + (4096*x0)), tmp19, rmask & xmask)
tmp20 = 4096.0
tmp21 = tmp6 / tmp20
tmp22 = 1e-05
tmp23 = tmp21 + tmp22
tmp24 = libdevice.rsqrt(tmp23)
tl.store(out_ptr4 + (x0), tmp24, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65 = args
args.clear()
assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_2, (32, 3, 9, 9), (243, 81, 9, 1))
assert_size_stride(primals_3, (32, ), (1, ))
assert_size_stride(primals_4, (32, ), (1, ))
assert_size_stride(primals_5, (32, ), (1, ))
assert_size_stride(primals_6, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (64, ), (1, ))
assert_size_stride(primals_8, (64, ), (1, ))
assert_size_stride(primals_9, (64, ), (1, ))
assert_size_stride(primals_10, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_11, (128, ), (1, ))
assert_size_stride(primals_12, (128, ), (1, ))
assert_size_stride(primals_13, (128, ), (1, ))
assert_size_stride(primals_14, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_15, (128, ), (1, ))
assert_size_stride(primals_16, (128, ), (1, ))
assert_size_stride(primals_17, (128, ), (1, ))
assert_size_stride(primals_18, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_19, (128, ), (1, ))
assert_size_stride(primals_20, (128, ), (1, ))
assert_size_stride(primals_21, (128, ), (1, ))
assert_size_stride(primals_22, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_23, (128, ), (1, ))
assert_size_stride(primals_24, (128, ), (1, ))
assert_size_stride(primals_25, (128, ), (1, ))
assert_size_stride(primals_26, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_27, (128, ), (1, ))
assert_size_stride(primals_28, (128, ), (1, ))
assert_size_stride(primals_29, (128, ), (1, ))
assert_size_stride(primals_30, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_31, (128, ), (1, ))
assert_size_stride(primals_32, (128, ), (1, ))
assert_size_stride(primals_33, (128, ), (1, ))
assert_size_stride(primals_34, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_35, (128, ), (1, ))
assert_size_stride(primals_36, (128, ), (1, ))
assert_size_stride(primals_37, (128, ), (1, ))
assert_size_stride(primals_38, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_39, (128, ), (1, ))
assert_size_stride(primals_40, (128, ), (1, ))
assert_size_stride(primals_41, (128, ), (1, ))
assert_size_stride(primals_42, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_43, (128, ), (1, ))
assert_size_stride(primals_44, (128, ), (1, ))
assert_size_stride(primals_45, (128, ), (1, ))
assert_size_stride(primals_46, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_47, (128, ), (1, ))
assert_size_stride(primals_48, (128, ), (1, ))
assert_size_stride(primals_49, (128, ), (1, ))
assert_size_stride(primals_50, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_51, (128, ), (1, ))
assert_size_stride(primals_52, (128, ), (1, ))
assert_size_stride(primals_53, (128, ), (1, ))
assert_size_stride(primals_54, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_55, (64, ), (1, ))
assert_size_stride(primals_56, (64, ), (1, ))
assert_size_stride(primals_57, (64, ), (1, ))
assert_size_stride(primals_58, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_59, (32, ), (1, ))
assert_size_stride(primals_60, (32, ), (1, ))
assert_size_stride(primals_61, (32, ), (1, ))
assert_size_stride(primals_62, (3, 32, 9, 9), (2592, 81, 9, 1))
assert_size_stride(primals_63, (3, ), (1, ))
assert_size_stride(primals_64, (3, ), (1, ))
assert_size_stride(primals_65, (3, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 74, 74), (16428, 5476, 74, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.reflection_pad2d]
stream0 = get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0.run(primals_1, buf0, 65712, grid=grid(65712), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 32, 66, 66), (139392, 4356, 66, 1))
buf2 = buf1; del buf1 # reuse
buf5 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 1, 1), torch.float32)
buf6 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch.float32)
buf8 = reinterpret_tensor(buf6, (1, 128, 1, 1), (128, 1, 1, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
triton_red_fused__native_batch_norm_legit_convolution_1.run(buf2, buf8, primals_3, buf5, 128, 4356, grid=grid(128), stream=stream0)
del primals_3
buf3 = empty_strided_cuda((128, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.repeat]
triton_poi_fused_repeat_2.run(primals_4, buf3, 128, grid=grid(128), stream=stream0)
del primals_4
buf4 = empty_strided_cuda((128, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.repeat]
triton_poi_fused_repeat_2.run(primals_5, buf4, 128, grid=grid(128), stream=stream0)
del primals_5
buf9 = empty_strided_cuda((4, 32, 67, 67), (143648, 4489, 67, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.relu, aten.reflection_pad2d]
triton_poi_fused_reflection_pad2d_relu_3.run(buf2, buf5, buf8, buf3, buf4, buf9, 574592, grid=grid(574592), stream=stream0)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf9, primals_6, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 64, 33, 33), (69696, 1089, 33, 1))
buf11 = buf10; del buf10 # reuse
buf14 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 1, 1), torch.float32)
buf15 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32)
buf17 = reinterpret_tensor(buf15, (1, 256, 1, 1), (256, 1, 1, 1), 0); del buf15 # reuse
# Topologically Sorted Source Nodes: [x_5, x_6], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
triton_red_fused__native_batch_norm_legit_convolution_4.run(buf11, buf17, primals_7, buf14, 256, 1089, grid=grid(256), stream=stream0)
del primals_7
buf12 = empty_strided_cuda((256, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.repeat]
triton_poi_fused_repeat_5.run(primals_8, buf12, 256, grid=grid(256), stream=stream0)
del primals_8
buf13 = empty_strided_cuda((256, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.repeat]
triton_poi_fused_repeat_5.run(primals_9, buf13, 256, grid=grid(256), stream=stream0)
del primals_9
buf18 = empty_strided_cuda((4, 64, 34, 34), (73984, 1156, 34, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_7, x_8], Original ATen: [aten.relu, aten.reflection_pad2d]
triton_poi_fused_reflection_pad2d_relu_6.run(buf11, buf14, buf17, buf12, buf13, buf18, 295936, grid=grid(295936), stream=stream0)
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.convolution]
buf19 = extern_kernels.convolution(buf18, primals_10, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf19, (4, 128, 16, 16), (32768, 256, 16, 1))
buf20 = buf19; del buf19 # reuse
buf23 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch.float32)
buf24 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32)
buf26 = reinterpret_tensor(buf24, (1, 512, 1, 1), (512, 1, 1, 1), 0); del buf24 # reuse
# Topologically Sorted Source Nodes: [x_9, x_10], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_convolution_7.run(buf20, buf26, primals_11, buf23, 512, 256, grid=grid(512), stream=stream0)
del primals_11
buf21 = empty_strided_cuda((512, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.repeat]
triton_poi_fused_repeat_8.run(primals_12, buf21, 512, grid=grid(512), stream=stream0)
del primals_12
buf22 = empty_strided_cuda((512, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.repeat]
triton_poi_fused_repeat_8.run(primals_13, buf22, 512, grid=grid(512), stream=stream0)
del primals_13
buf27 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_11, x_12], Original ATen: [aten.relu, aten.reflection_pad2d]
triton_poi_fused_reflection_pad2d_relu_9.run(buf20, buf23, buf26, buf21, buf22, buf27, 165888, grid=grid(165888), stream=stream0)
# Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.convolution]
buf28 = extern_kernels.convolution(buf27, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 128, 16, 16), (32768, 256, 16, 1))
buf30 = empty_strided_cuda((512, ), (1, ), torch.float32)
buf31 = empty_strided_cuda((512, ), (1, ), torch.float32)
buf29 = buf28; del buf28 # reuse
buf32 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch.float32)
buf33 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32)
buf35 = reinterpret_tensor(buf33, (1, 512, 1, 1), (512, 1, 1, 1), 0); del buf33 # reuse
buf36 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_13, x_14, x_15], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit, aten.relu]
triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_10.run(buf29, buf35, primals_16, primals_17, primals_15, buf30, buf31, buf32, buf36, 512, 256, grid=grid(512), stream=stream0)
del primals_15
del primals_16
del primals_17
buf37 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.reflection_pad2d]
triton_poi_fused_reflection_pad2d_11.run(buf36, buf37, 165888, grid=grid(165888), stream=stream0)
# Topologically Sorted Source Nodes: [x_17], Original ATen: [aten.convolution]
buf38 = extern_kernels.convolution(buf37, primals_18, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 128, 16, 16), (32768, 256, 16, 1))
buf39 = buf38; del buf38 # reuse
buf42 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch.float32)
buf43 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32)
buf45 = reinterpret_tensor(buf43, (1, 512, 1, 1), (512, 1, 1, 1), 0); del buf43 # reuse
# Topologically Sorted Source Nodes: [x_17, x_18], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_convolution_7.run(buf39, buf45, primals_19, buf42, 512, 256, grid=grid(512), stream=stream0)
del primals_19
buf40 = empty_strided_cuda((512, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_18], Original ATen: [aten.repeat]
triton_poi_fused_repeat_8.run(primals_20, buf40, 512, grid=grid(512), stream=stream0)
del primals_20
buf41 = empty_strided_cuda((512, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_18], Original ATen: [aten.repeat]
triton_poi_fused_repeat_8.run(primals_21, buf41, 512, grid=grid(512), stream=stream0)
del primals_21
buf46 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_19, x_20, x_21], Original ATen: [aten.relu, aten.add, aten.reflection_pad2d]
triton_poi_fused_add_reflection_pad2d_relu_12.run(buf36, buf39, buf42, buf45, buf40, buf41, buf46, 165888, grid=grid(165888), stream=stream0)
# Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.convolution]
buf47 = extern_kernels.convolution(buf46, primals_22, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf47, (4, 128, 16, 16), (32768, 256, 16, 1))
buf49 = empty_strided_cuda((512, ), (1, ), torch.float32)
buf50 = empty_strided_cuda((512, ), (1, ), torch.float32)
buf48 = buf47; del buf47 # reuse
buf51 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch.float32)
buf52 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32)
buf54 = reinterpret_tensor(buf52, (1, 512, 1, 1), (512, 1, 1, 1), 0); del buf52 # reuse
buf55 = buf36; del buf36 # reuse
# Topologically Sorted Source Nodes: [x_22, x_23, x_24], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit, aten.relu]
triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_10.run(buf48, buf54, primals_24, primals_25, primals_23, buf49, buf50, buf51, buf55, 512, 256, grid=grid(512), stream=stream0)
del primals_23
del primals_24
del primals_25
buf56 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.reflection_pad2d]
triton_poi_fused_reflection_pad2d_11.run(buf55, buf56, 165888, grid=grid(165888), stream=stream0)
# Topologically Sorted Source Nodes: [x_26], Original ATen: [aten.convolution]
buf57 = extern_kernels.convolution(buf56, primals_26, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf57, (4, 128, 16, 16), (32768, 256, 16, 1))
buf58 = buf57; del buf57 # reuse
buf61 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch.float32)
buf62 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32)
buf64 = reinterpret_tensor(buf62, (1, 512, 1, 1), (512, 1, 1, 1), 0); del buf62 # reuse
# Topologically Sorted Source Nodes: [x_26, x_27], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_convolution_7.run(buf58, buf64, primals_27, buf61, 512, 256, grid=grid(512), stream=stream0)
del primals_27
buf59 = empty_strided_cuda((512, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_27], Original ATen: [aten.repeat]
triton_poi_fused_repeat_8.run(primals_28, buf59, 512, grid=grid(512), stream=stream0)
del primals_28
buf60 = empty_strided_cuda((512, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_27], Original ATen: [aten.repeat]
triton_poi_fused_repeat_8.run(primals_29, buf60, 512, grid=grid(512), stream=stream0)
del primals_29
buf65 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_28, x_29, x_30], Original ATen: [aten.relu, aten.add, aten.reflection_pad2d]
triton_poi_fused_add_reflection_pad2d_relu_12.run(buf55, buf58, buf61, buf64, buf59, buf60, buf65, 165888, grid=grid(165888), stream=stream0)
# Topologically Sorted Source Nodes: [x_31], Original ATen: [aten.convolution]
buf66 = extern_kernels.convolution(buf65, primals_30, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf66, (4, 128, 16, 16), (32768, 256, 16, 1))
buf68 = empty_strided_cuda((512, ), (1, ), torch.float32)
buf69 = empty_strided_cuda((512, ), (1, ), torch.float32)
buf67 = buf66; del buf66 # reuse
buf70 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch.float32)
buf71 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32)
buf73 = reinterpret_tensor(buf71, (1, 512, 1, 1), (512, 1, 1, 1), 0); del buf71 # reuse
buf74 = buf55; del buf55 # reuse
# Topologically Sorted Source Nodes: [x_31, x_32, x_33], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit, aten.relu]
triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_10.run(buf67, buf73, primals_32, primals_33, primals_31, buf68, buf69, buf70, buf74, 512, 256, grid=grid(512), stream=stream0)
del primals_31
del primals_32
del primals_33
buf75 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_34], Original ATen: [aten.reflection_pad2d]
triton_poi_fused_reflection_pad2d_11.run(buf74, buf75, 165888, grid=grid(165888), stream=stream0)
# Topologically Sorted Source Nodes: [x_35], Original ATen: [aten.convolution]
buf76 = extern_kernels.convolution(buf75, primals_34, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf76, (4, 128, 16, 16), (32768, 256, 16, 1))
buf77 = buf76; del buf76 # reuse
buf80 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch.float32)
buf81 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32)
buf83 = reinterpret_tensor(buf81, (1, 512, 1, 1), (512, 1, 1, 1), 0); del buf81 # reuse
# Topologically Sorted Source Nodes: [x_35, x_36], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_convolution_7.run(buf77, buf83, primals_35, buf80, 512, 256, grid=grid(512), stream=stream0)
del primals_35
buf78 = empty_strided_cuda((512, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_36], Original ATen: [aten.repeat]
triton_poi_fused_repeat_8.run(primals_36, buf78, 512, grid=grid(512), stream=stream0)
del primals_36
buf79 = empty_strided_cuda((512, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_36], Original ATen: [aten.repeat]
triton_poi_fused_repeat_8.run(primals_37, buf79, 512, grid=grid(512), stream=stream0)
del primals_37
buf84 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_37, x_38, x_39], Original ATen: [aten.relu, aten.add, aten.reflection_pad2d]
triton_poi_fused_add_reflection_pad2d_relu_12.run(buf74, buf77, buf80, buf83, buf78, buf79, buf84, 165888, grid=grid(165888), stream=stream0)
# Topologically Sorted Source Nodes: [x_40], Original ATen: [aten.convolution]
buf85 = extern_kernels.convolution(buf84, primals_38, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf85, (4, 128, 16, 16), (32768, 256, 16, 1))
buf87 = empty_strided_cuda((512, ), (1, ), torch.float32)
buf88 = empty_strided_cuda((512, ), (1, ), torch.float32)
buf86 = buf85; del buf85 # reuse
buf89 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch.float32)
buf90 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32)
buf92 = reinterpret_tensor(buf90, (1, 512, 1, 1), (512, 1, 1, 1), 0); del buf90 # reuse
buf93 = buf74; del buf74 # reuse
# Topologically Sorted Source Nodes: [x_40, x_41, x_42], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit, aten.relu]
triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_10.run(buf86, buf92, primals_40, primals_41, primals_39, buf87, buf88, buf89, buf93, 512, 256, grid=grid(512), stream=stream0)
del primals_39
del primals_40
del primals_41
buf94 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_43], Original ATen: [aten.reflection_pad2d]
triton_poi_fused_reflection_pad2d_11.run(buf93, buf94, 165888, grid=grid(165888), stream=stream0)
# Topologically Sorted Source Nodes: [x_44], Original ATen: [aten.convolution]
buf95 = extern_kernels.convolution(buf94, primals_42, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf95, (4, 128, 16, 16), (32768, 256, 16, 1))
buf96 = buf95; del buf95 # reuse
buf99 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch.float32)
buf100 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32)
buf102 = reinterpret_tensor(buf100, (1, 512, 1, 1), (512, 1, 1, 1), 0); del buf100 # reuse
# Topologically Sorted Source Nodes: [x_44, x_45], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_convolution_7.run(buf96, buf102, primals_43, buf99, 512, 256, grid=grid(512), stream=stream0)
del primals_43
buf97 = empty_strided_cuda((512, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_45], Original ATen: [aten.repeat]
triton_poi_fused_repeat_8.run(primals_44, buf97, 512, grid=grid(512), stream=stream0)
del primals_44
buf98 = empty_strided_cuda((512, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_45], Original ATen: [aten.repeat]
triton_poi_fused_repeat_8.run(primals_45, buf98, 512, grid=grid(512), stream=stream0)
del primals_45
buf103 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_46, x_47, x_48], Original ATen: [aten.relu, aten.add, aten.reflection_pad2d]
triton_poi_fused_add_reflection_pad2d_relu_12.run(buf93, buf96, buf99, buf102, buf97, buf98, buf103, 165888, grid=grid(165888), stream=stream0)
# Topologically Sorted Source Nodes: [x_49], Original ATen: [aten.convolution]
buf104 = extern_kernels.convolution(buf103, primals_46, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf104, (4, 128, 16, 16), (32768, 256, 16, 1))
buf106 = empty_strided_cuda((512, ), (1, ), torch.float32)
buf107 = empty_strided_cuda((512, ), (1, ), torch.float32)
buf105 = buf104; del buf104 # reuse
buf108 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch.float32)
buf109 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32)
buf111 = reinterpret_tensor(buf109, (1, 512, 1, 1), (512, 1, 1, 1), 0); del buf109 # reuse
buf112 = buf93; del buf93 # reuse
# Topologically Sorted Source Nodes: [x_49, x_50, x_51], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit, aten.relu]
triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_10.run(buf105, buf111, primals_48, primals_49, primals_47, buf106, buf107, buf108, buf112, 512, 256, grid=grid(512), stream=stream0)
del primals_47
del primals_48
del primals_49
buf113 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_52], Original ATen: [aten.reflection_pad2d]
triton_poi_fused_reflection_pad2d_11.run(buf112, buf113, 165888, grid=grid(165888), stream=stream0)
# Topologically Sorted Source Nodes: [x_53], Original ATen: [aten.convolution]
buf114 = extern_kernels.convolution(buf113, primals_50, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf114, (4, 128, 16, 16), (32768, 256, 16, 1))
buf115 = buf114; del buf114 # reuse
buf118 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch.float32)
buf119 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32)
buf121 = reinterpret_tensor(buf119, (1, 512, 1, 1), (512, 1, 1, 1), 0); del buf119 # reuse
# Topologically Sorted Source Nodes: [x_53, x_54], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_convolution_7.run(buf115, buf121, primals_51, buf118, 512, 256, grid=grid(512), stream=stream0)
del primals_51
buf116 = empty_strided_cuda((512, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_54], Original ATen: [aten.repeat]
triton_poi_fused_repeat_8.run(primals_52, buf116, 512, grid=grid(512), stream=stream0)
del primals_52
buf117 = empty_strided_cuda((512, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_54], Original ATen: [aten.repeat]
triton_poi_fused_repeat_8.run(primals_53, buf117, 512, grid=grid(512), stream=stream0)
del primals_53
buf122 = empty_strided_cuda((32, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_58], Original ATen: [aten.arange]
triton_poi_fused_arange_13.run(buf122, 32, grid=grid(32), stream=stream0)
buf123 = empty_strided_cuda((32, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_58], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
triton_poi_fused__to_copy_add_arange_mul_14.run(buf123, 32, grid=grid(32), stream=stream0)
buf124 = empty_strided_cuda((4, 128, 34, 32), (139264, 1088, 32, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_55, x_56, x_57, x_58, x_59], Original ATen: [aten.relu, aten.add, aten.reflection_pad2d, aten._unsafe_index]
triton_poi_fused__unsafe_index_add_reflection_pad2d_relu_15.run(buf123, buf112, buf115, buf118, buf121, buf116, buf117, buf124, 557056, grid=grid(557056), stream=stream0)
del buf112
buf125 = empty_strided_cuda((4, 128, 34, 34), (147968, 1156, 34, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_59], Original ATen: [aten.reflection_pad2d]
triton_poi_fused_reflection_pad2d_16.run(buf124, buf125, 591872, grid=grid(591872), stream=stream0)
del buf124
# Topologically Sorted Source Nodes: [x_60], Original ATen: [aten.convolution]
buf126 = extern_kernels.convolution(buf125, primals_54, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf126, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf127 = buf126; del buf126 # reuse
buf130 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 1, 1), torch.float32)
buf131 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32)
buf133 = reinterpret_tensor(buf131, (1, 256, 1, 1), (256, 1, 1, 1), 0); del buf131 # reuse
# Topologically Sorted Source Nodes: [x_60, x_61], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_convolution_17.run(buf127, buf133, primals_55, buf130, 256, 1024, grid=grid(256), stream=stream0)
del primals_55
buf128 = empty_strided_cuda((256, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_61], Original ATen: [aten.repeat]
triton_poi_fused_repeat_5.run(primals_56, buf128, 256, grid=grid(256), stream=stream0)
del primals_56
buf129 = empty_strided_cuda((256, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_61], Original ATen: [aten.repeat]
triton_poi_fused_repeat_5.run(primals_57, buf129, 256, grid=grid(256), stream=stream0)
del primals_57
buf134 = empty_strided_cuda((64, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_64], Original ATen: [aten.arange]
triton_poi_fused_arange_18.run(buf134, 64, grid=grid(64), stream=stream0)
buf135 = empty_strided_cuda((64, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_64], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
triton_poi_fused__to_copy_add_arange_mul_19.run(buf135, 64, grid=grid(64), stream=stream0)
buf136 = empty_strided_cuda((4, 64, 66, 66), (278784, 4356, 66, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_62, x_63, x_64, x_65], Original ATen: [aten.relu, aten.reflection_pad2d, aten._unsafe_index]
triton_poi_fused__unsafe_index_reflection_pad2d_relu_20.run(buf135, buf127, buf130, buf133, buf128, buf129, buf136, 1115136, grid=grid(1115136), stream=stream0)
# Topologically Sorted Source Nodes: [x_66], Original ATen: [aten.convolution]
buf137 = extern_kernels.convolution(buf136, primals_58, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf137, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf138 = buf137; del buf137 # reuse
buf141 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 1, 1), torch.float32)
buf142 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch.float32)
buf144 = reinterpret_tensor(buf142, (1, 128, 1, 1), (128, 1, 1, 1), 0); del buf142 # reuse
# Topologically Sorted Source Nodes: [x_66, x_67], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
triton_red_fused__native_batch_norm_legit_convolution_21.run(buf138, buf144, primals_59, buf141, 128, 4096, grid=grid(128), stream=stream0)
del primals_59
buf139 = empty_strided_cuda((128, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_67], Original ATen: [aten.repeat]
triton_poi_fused_repeat_2.run(primals_60, buf139, 128, grid=grid(128), stream=stream0)
del primals_60
buf140 = empty_strided_cuda((128, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_67], Original ATen: [aten.repeat]
triton_poi_fused_repeat_2.run(primals_61, buf140, 128, grid=grid(128), stream=stream0)
del primals_61
buf145 = empty_strided_cuda((4, 32, 72, 72), (165888, 5184, 72, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_68, x_69], Original ATen: [aten.relu, aten.reflection_pad2d]
triton_poi_fused_reflection_pad2d_relu_22.run(buf138, buf141, buf144, buf139, buf140, buf145, 663552, grid=grid(663552), stream=stream0)
# Topologically Sorted Source Nodes: [x_70], Original ATen: [aten.convolution]
buf146 = extern_kernels.convolution(buf145, primals_62, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf146, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf148 = empty_strided_cuda((12, ), (1, ), torch.float32)
buf147 = buf146; del buf146 # reuse
buf149 = empty_strided_cuda((1, 12, 1, 1), (12, 1, 12, 12), torch.float32)
buf153 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.float32)
buf152 = empty_strided_cuda((1, 12, 1, 1), (12, 1, 12, 12), torch.float32)
# Topologically Sorted Source Nodes: [x_70, x_71, x_72], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit, aten.sigmoid]
triton_red_fused__native_batch_norm_legit_convolution_repeat_sigmoid_23.run(buf147, primals_64, primals_63, primals_65, buf148, buf149, buf153, buf152, 12, 4096, grid=grid(12), stream=stream0)
del primals_63
del primals_64
del primals_65
return (buf153, primals_2, primals_6, primals_10, primals_14, primals_18, primals_22, primals_26, primals_30, primals_34, primals_38, primals_42, primals_46, primals_50, primals_54, primals_58, primals_62, buf0, buf2, buf3, buf4, buf5, buf8, buf9, buf11, buf12, buf13, buf14, buf17, buf18, buf20, buf21, buf22, buf23, buf26, buf27, buf29, buf30, buf31, buf32, buf35, buf37, buf39, buf40, buf41, buf42, buf45, buf46, buf48, buf49, buf50, buf51, buf54, buf56, buf58, buf59, buf60, buf61, buf64, buf65, buf67, buf68, buf69, buf70, buf73, buf75, buf77, buf78, buf79, buf80, buf83, buf84, buf86, buf87, buf88, buf89, buf92, buf94, buf96, buf97, buf98, buf99, buf102, buf103, buf105, buf106, buf107, buf108, buf111, buf113, buf115, buf116, buf117, buf118, buf121, buf122, buf123, buf125, buf127, buf128, buf129, buf130, buf133, buf134, buf135, buf136, buf138, buf139, buf140, buf141, buf144, buf145, buf147, buf148, reinterpret_tensor(buf152, (12, ), (1, ), 0), buf153, reinterpret_tensor(buf149, (1, 12, 1, 1), (12, 1, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, 3, 9, 9), (243, 81, 9, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((64, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_28 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_29 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_30 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_31 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_32 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_33 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_34 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_35 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_36 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_37 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_38 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_39 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_40 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_41 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_42 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_43 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_44 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_45 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_46 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_47 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_48 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_49 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_50 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_51 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_52 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_53 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_54 = rand_strided((64, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_55 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_56 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_57 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_58 = rand_strided((32, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_59 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_60 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_61 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_62 = rand_strided((3, 32, 9, 9), (2592, 81, 9, 1), device='cuda:0', dtype=torch.float32)
primals_63 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_64 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_65 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class GenericLayer(nn.Module):
def __init__(self, layer, out_channels, padding=(0, 0, 0, 0),
activation=None):
super(GenericLayer, self).__init__()
self._act = activation
self._layer = layer
self._norm = nn.InstanceNorm2d(out_channels, affine=True)
self._pad = nn.ReflectionPad2d(padding)
def forward(self, x):
x = self._pad(x)
x = self._layer(x)
x = self._norm(x)
if self._act is not None:
x = self._act(x)
return x
class ResidualBlock(nn.Module):
def __init__(self, channels, kernel_size, stride, padding=(0, 0, 0, 0)):
super(ResidualBlock, self).__init__()
self._conv_1 = GenericLayer(nn.Conv2d(128, 128, 3, 1), 128, (1, 1,
1, 1), nn.ReLU())
self._conv_2 = GenericLayer(nn.Conv2d(128, 128, 3, 1), 128, (1, 1,
1, 1), nn.ReLU())
def forward(self, x):
x = self._conv_1(x)
x = x + self._conv_2(x)
return x
class UpsampleConvLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride,
scale_factor):
super(UpsampleConvLayer, self).__init__()
self._scale_factor = scale_factor
self._reflection_pad = nn.ReflectionPad2d(kernel_size // 2)
self._conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, x):
x = nn.functional.interpolate(x, mode='nearest', scale_factor=self.
_scale_factor)
x = self._reflection_pad(x)
x = self._conv(x)
return x
class TransferNet(nn.Module):
def __init__(self):
super(TransferNet, self).__init__()
self._conv_1 = GenericLayer(nn.Conv2d(3, 32, 9, 1), 32, (5, 5, 5, 5
), nn.ReLU())
self._conv_2 = GenericLayer(nn.Conv2d(32, 64, 3, 2), 64, (1, 0, 1,
0), nn.ReLU())
self._conv_3 = GenericLayer(nn.Conv2d(64, 128, 3, 2), 128, (1, 0, 1,
0), nn.ReLU())
self._res_1 = ResidualBlock(128, 3, 1, (1, 1, 1, 1))
self._res_2 = ResidualBlock(128, 3, 1, (1, 1, 1, 1))
self._res_3 = ResidualBlock(128, 3, 1, (1, 1, 1, 1))
self._res_4 = ResidualBlock(128, 3, 1, (1, 1, 1, 1))
self._res_5 = ResidualBlock(128, 3, 1, (1, 1, 1, 1))
self._conv_4 = GenericLayer(UpsampleConvLayer(128, 64, 3, 1, 2), 64,
(0, 0, 0, 0), nn.ReLU())
self._conv_5 = GenericLayer(UpsampleConvLayer(64, 32, 3, 1, 2), 32,
(0, 0, 0, 0), nn.ReLU())
self._conv_6 = GenericLayer(nn.Conv2d(32, 3, 9, 1), 3, (4, 4, 4, 4),
nn.Sigmoid())
def forward(self, x):
x = self._conv_1(x)
x = self._conv_2(x)
x = self._conv_3(x)
x = self._res_1(x)
x = self._res_2(x)
x = self._res_3(x)
x = self._res_4(x)
x = self._res_5(x)
x = self._conv_4(x)
x = self._conv_5(x)
x = self._conv_6(x)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 65712
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 74
x1 = xindex // 74 % 74
x2 = xindex // 5476
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4095 + -1 * tl_math.abs(-63 + tl_math.abs(-5 +
x0)) + -64 * tl_math.abs(-63 + tl_math.abs(-5 + x1)) + 4096 * x2),
xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_red_fused__native_batch_norm_legit_convolution_1(in_out_ptr0,
in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr,
RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 4356
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x3 = xindex
x0 = xindex % 32
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp4_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_out_ptr0 + (r2 + 4356 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp4_mean_next, tmp4_m2_next, tmp4_weight_next = (triton_helpers.
welford_reduce(tmp3, tmp4_mean, tmp4_m2, tmp4_weight, roffset == 0)
)
tmp4_mean = tl.where(rmask & xmask, tmp4_mean_next, tmp4_mean)
tmp4_m2 = tl.where(rmask & xmask, tmp4_m2_next, tmp4_m2)
tmp4_weight = tl.where(rmask & xmask, tmp4_weight_next, tmp4_weight)
tl.store(in_out_ptr0 + (r2 + 4356 * x3), tmp2, rmask & xmask)
tmp4_tmp, tmp5_tmp, tmp6_tmp = triton_helpers.welford(tmp4_mean,
tmp4_m2, tmp4_weight, 1)
tmp4 = tmp4_tmp[:, None]
tmp5 = tmp5_tmp[:, None]
tmp6_tmp[:, None]
tl.store(out_ptr0 + x3, tmp4, xmask)
tmp7 = 4356.0
tmp8 = tmp5 / tmp7
tmp9 = 1e-05
tmp10 = tmp8 + tmp9
tmp11 = libdevice.rsqrt(tmp10)
tl.debug_barrier()
tl.store(in_out_ptr1 + x3, tmp11, xmask)
@triton.jit
def triton_poi_fused_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0 % 32, xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_reflection_pad2d_relu_3(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 574592
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 67
x1 = xindex // 67 % 67
x2 = xindex // 4489
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4355 + -1 * tl_math.abs(-65 + tl_math.abs(-1 +
x0)) + -66 * tl_math.abs(-65 + tl_math.abs(-1 + x1)) + 4356 * x2),
xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_red_fused__native_batch_norm_legit_convolution_4(in_out_ptr0,
in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr,
RBLOCK: tl.constexpr):
xnumel = 256
rnumel = 1089
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x3 = xindex
x0 = xindex % 64
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp4_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_out_ptr0 + (r2 + 1089 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp4_mean_next, tmp4_m2_next, tmp4_weight_next = (triton_helpers.
welford_reduce(tmp3, tmp4_mean, tmp4_m2, tmp4_weight, roffset == 0)
)
tmp4_mean = tl.where(rmask & xmask, tmp4_mean_next, tmp4_mean)
tmp4_m2 = tl.where(rmask & xmask, tmp4_m2_next, tmp4_m2)
tmp4_weight = tl.where(rmask & xmask, tmp4_weight_next, tmp4_weight)
tl.store(in_out_ptr0 + (r2 + 1089 * x3), tmp2, rmask & xmask)
tmp4_tmp, tmp5_tmp, tmp6_tmp = triton_helpers.welford(tmp4_mean,
tmp4_m2, tmp4_weight, 1)
tmp4 = tmp4_tmp[:, None]
tmp5 = tmp5_tmp[:, None]
tmp6_tmp[:, None]
tl.store(out_ptr0 + x3, tmp4, xmask)
tmp7 = 1089.0
tmp8 = tmp5 / tmp7
tmp9 = 1e-05
tmp10 = tmp8 + tmp9
tmp11 = libdevice.rsqrt(tmp10)
tl.debug_barrier()
tl.store(in_out_ptr1 + x3, tmp11, xmask)
@triton.jit
def triton_poi_fused_repeat_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0 % 64, xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_reflection_pad2d_relu_6(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 295936
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 34
x1 = xindex // 34 % 34
x2 = xindex // 1156
x3 = xindex
tmp0 = tl.load(in_ptr0 + (1088 + -1 * tl_math.abs(-32 + tl_math.abs(-1 +
x0)) + -33 * tl_math.abs(-32 + tl_math.abs(-1 + x1)) + 1089 * x2),
xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_7(in_out_ptr0,
in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (r2 + 256 * x3), None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = tl.broadcast_to(tmp3, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = tl.full([1], 256, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp3 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tl.store(in_out_ptr0 + (r2 + 256 * x3), tmp2, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + x3, tmp20, None)
tl.store(out_ptr0 + x3, tmp10, None)
@triton.jit
def triton_poi_fused_repeat_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0 % 128, xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_reflection_pad2d_relu_9(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 18
x1 = xindex // 18 % 18
x2 = xindex // 324
x3 = xindex
tmp0 = tl.load(in_ptr0 + (255 + -1 * tl_math.abs(-15 + tl_math.abs(-1 +
x0)) + -16 * tl_math.abs(-15 + tl_math.abs(-1 + x1)) + 256 * x2),
None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x2, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + x3, tmp10, None)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_10(
in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1,
out_ptr2, out_ptr3, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
x0 = xindex
r3 = rindex
x1 = xindex % 128
tmp0 = tl.load(in_ptr0 + x0 % 128, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x0 % 128, None, eviction_policy='evict_last')
tmp2 = tl.load(in_out_ptr0 + (r3 + 256 * x0), None)
tmp3 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp5 = tl.broadcast_to(tmp4, [RBLOCK])
tmp7 = tl.broadcast_to(tmp5, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = tl.full([1], 256, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp5 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [RBLOCK])
tmp17 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0))
tmp18 = 256.0
tmp19 = tmp17 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp4 - tmp12
tmp24 = tmp23 * tmp22
tmp25 = tmp24 * tmp0
tmp26 = tmp25 + tmp1
tmp27 = tl.full([1], 0, tl.int32)
tmp28 = triton_helpers.maximum(tmp27, tmp26)
tl.store(out_ptr0 + x0, tmp0, None)
tl.store(out_ptr1 + x0, tmp1, None)
tl.store(in_out_ptr0 + (r3 + 256 * x0), tmp4, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp22, None)
tl.store(out_ptr3 + (r3 + 256 * x0), tmp28, None)
tl.store(out_ptr2 + x0, tmp12, None)
@triton.jit
def triton_poi_fused_reflection_pad2d_11(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 18
x1 = xindex // 18 % 18
x2 = xindex // 324
x3 = xindex
tmp0 = tl.load(in_ptr0 + (255 + -1 * tl_math.abs(-15 + tl_math.abs(-1 +
x0)) + -16 * tl_math.abs(-15 + tl_math.abs(-1 + x1)) + 256 * x2),
None, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, None)
@triton.jit
def triton_poi_fused_add_reflection_pad2d_relu_12(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 18
x1 = xindex // 18 % 18
x2 = xindex // 324
x3 = xindex
tmp0 = tl.load(in_ptr0 + (255 + -1 * tl_math.abs(-15 + tl_math.abs(-1 +
x0)) + -16 * tl_math.abs(-15 + tl_math.abs(-1 + x1)) + 256 * x2),
None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (255 + -1 * tl_math.abs(-15 + tl_math.abs(-1 +
x0)) + -16 * tl_math.abs(-15 + tl_math.abs(-1 + x1)) + 256 * x2),
None, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x2, None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr5 + x2, None, eviction_policy='evict_last')
tmp3 = tmp1 - tmp2
tmp5 = tmp3 * tmp4
tmp7 = tmp5 * tmp6
tmp9 = tmp7 + tmp8
tmp10 = tl.full([1], 0, tl.int32)
tmp11 = triton_helpers.maximum(tmp10, tmp9)
tmp12 = tmp0 + tmp11
tl.store(out_ptr0 + x3, tmp12, None)
@triton.jit
def triton_poi_fused_arange_13(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_14(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_reflection_pad2d_relu_15(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 32 % 34
x0 = xindex % 32
x2 = xindex // 1088
x4 = xindex
tmp0 = tl.load(in_ptr0 + (31 + -1 * tl_math.abs(-31 + tl_math.abs(-1 +
x1))), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr5 + x2, None, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr6 + x2, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 16, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (255 + -1 * tl_math.abs(-15 + tmp8) + -16 *
tl_math.abs(-15 + tmp4) + 256 * x2), None, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr2 + (255 + -1 * tl_math.abs(-15 + tmp8) + -16 *
tl_math.abs(-15 + tmp4) + 256 * x2), None, eviction_policy='evict_last'
)
tmp12 = tmp10 - tmp11
tmp14 = tmp12 * tmp13
tmp16 = tmp14 * tmp15
tmp18 = tmp16 + tmp17
tmp19 = tl.full([1], 0, tl.int32)
tmp20 = triton_helpers.maximum(tmp19, tmp18)
tmp21 = tmp9 + tmp20
tl.store(out_ptr0 + x4, tmp21, None)
@triton.jit
def triton_poi_fused_reflection_pad2d_16(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 34
x1 = xindex // 34
x2 = xindex
tmp0 = tl.load(in_ptr0 + (31 + -1 * tl_math.abs(-31 + tl_math.abs(-1 +
x0)) + 32 * x1), None, eviction_policy='evict_last')
tl.store(out_ptr0 + x2, tmp0, None)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_17(in_out_ptr0,
in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (r2 + 1024 * x3), None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = tl.broadcast_to(tmp3, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = tl.full([1], 1024, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp3 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 1024.0
tmp17 = tmp15 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tl.store(in_out_ptr0 + (r2 + 1024 * x3), tmp2, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + x3, tmp20, None)
tl.store(out_ptr0 + x3, tmp10, None)
@triton.jit
def triton_poi_fused_arange_18(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_19(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_reflection_pad2d_relu_20(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 1115136
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 66 % 66
x0 = xindex % 66
x2 = xindex // 4356
x5 = xindex
tmp0 = tl.load(in_ptr0 + (63 + -1 * tl_math.abs(-63 + tl_math.abs(-1 +
x1))), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (63 + -1 * tl_math.abs(-63 + tl_math.abs(-1 +
x0))), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + x2, xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 32, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (1023 + -1 * tl_math.abs(-31 + tmp8) + -32 *
tl_math.abs(-31 + tmp4) + 1024 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tmp9 - tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 * tmp14
tmp17 = tmp15 + tmp16
tmp18 = tl.full([1], 0, tl.int32)
tmp19 = triton_helpers.maximum(tmp18, tmp17)
tl.store(out_ptr0 + x5, tmp19, xmask)
@triton.jit
def triton_red_fused__native_batch_norm_legit_convolution_21(in_out_ptr0,
in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr,
RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x3 = xindex
x0 = xindex % 32
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp4_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_out_ptr0 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp4_mean_next, tmp4_m2_next, tmp4_weight_next = (triton_helpers.
welford_reduce(tmp3, tmp4_mean, tmp4_m2, tmp4_weight, roffset == 0)
)
tmp4_mean = tl.where(rmask & xmask, tmp4_mean_next, tmp4_mean)
tmp4_m2 = tl.where(rmask & xmask, tmp4_m2_next, tmp4_m2)
tmp4_weight = tl.where(rmask & xmask, tmp4_weight_next, tmp4_weight)
tl.store(in_out_ptr0 + (r2 + 4096 * x3), tmp2, rmask & xmask)
tmp4_tmp, tmp5_tmp, tmp6_tmp = triton_helpers.welford(tmp4_mean,
tmp4_m2, tmp4_weight, 1)
tmp4 = tmp4_tmp[:, None]
tmp5 = tmp5_tmp[:, None]
tmp6_tmp[:, None]
tl.store(out_ptr0 + x3, tmp4, xmask)
tmp7 = 4096.0
tmp8 = tmp5 / tmp7
tmp9 = 1e-05
tmp10 = tmp8 + tmp9
tmp11 = libdevice.rsqrt(tmp10)
tl.debug_barrier()
tl.store(in_out_ptr1 + x3, tmp11, xmask)
@triton.jit
def triton_poi_fused_reflection_pad2d_relu_22(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 72
x1 = xindex // 72 % 72
x2 = xindex // 5184
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4095 + -1 * tl_math.abs(-63 + tl_math.abs(-4 +
x0)) + -64 * tl_math.abs(-63 + tl_math.abs(-4 + x1)) + 4096 * x2),
None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x2, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + x3, tmp10, None)
@triton.jit
def triton_red_fused__native_batch_norm_legit_convolution_repeat_sigmoid_23(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3,
out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 12
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0 % 3, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
x1 = xindex % 3
tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp5_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp5_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp5_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r3 = rindex
tmp1 = tl.load(in_out_ptr0 + (r3 + 4096 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp3 = tmp1 + tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp5_mean_next, tmp5_m2_next, tmp5_weight_next = (triton_helpers.
welford_reduce(tmp4, tmp5_mean, tmp5_m2, tmp5_weight, roffset == 0)
)
tmp5_mean = tl.where(rmask & xmask, tmp5_mean_next, tmp5_mean)
tmp5_m2 = tl.where(rmask & xmask, tmp5_m2_next, tmp5_m2)
tmp5_weight = tl.where(rmask & xmask, tmp5_weight_next, tmp5_weight)
tl.store(in_out_ptr0 + (r3 + 4096 * x0), tmp3, rmask & xmask)
tmp5_tmp, tmp6_tmp, tmp7_tmp = triton_helpers.welford(tmp5_mean,
tmp5_m2, tmp5_weight, 1)
tmp5 = tmp5_tmp[:, None]
tmp6 = tmp6_tmp[:, None]
tmp7_tmp[:, None]
tl.store(out_ptr1 + x0, tmp5, xmask)
tmp17 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r3 = rindex
tmp8 = tl.load(in_out_ptr0 + (r3 + 4096 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp9 = tmp8 - tmp5
tmp10 = 4096.0
tmp11 = tmp6 / tmp10
tmp12 = 1e-05
tmp13 = tmp11 + tmp12
tmp14 = libdevice.rsqrt(tmp13)
tmp15 = tmp9 * tmp14
tmp16 = tmp15 * tmp0
tmp18 = tmp16 + tmp17
tmp19 = tl.sigmoid(tmp18)
tl.store(out_ptr3 + (r3 + 4096 * x0), tmp19, rmask & xmask)
tmp20 = 4096.0
tmp21 = tmp6 / tmp20
tmp22 = 1e-05
tmp23 = tmp21 + tmp22
tmp24 = libdevice.rsqrt(tmp23)
tl.store(out_ptr4 + x0, tmp24, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40, primals_41, primals_42,
primals_43, primals_44, primals_45, primals_46, primals_47,
primals_48, primals_49, primals_50, primals_51, primals_52,
primals_53, primals_54, primals_55, primals_56, primals_57,
primals_58, primals_59, primals_60, primals_61, primals_62,
primals_63, primals_64, primals_65) = args
args.clear()
assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_2, (32, 3, 9, 9), (243, 81, 9, 1))
assert_size_stride(primals_3, (32,), (1,))
assert_size_stride(primals_4, (32,), (1,))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (64,), (1,))
assert_size_stride(primals_9, (64,), (1,))
assert_size_stride(primals_10, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_11, (128,), (1,))
assert_size_stride(primals_12, (128,), (1,))
assert_size_stride(primals_13, (128,), (1,))
assert_size_stride(primals_14, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_15, (128,), (1,))
assert_size_stride(primals_16, (128,), (1,))
assert_size_stride(primals_17, (128,), (1,))
assert_size_stride(primals_18, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_19, (128,), (1,))
assert_size_stride(primals_20, (128,), (1,))
assert_size_stride(primals_21, (128,), (1,))
assert_size_stride(primals_22, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_23, (128,), (1,))
assert_size_stride(primals_24, (128,), (1,))
assert_size_stride(primals_25, (128,), (1,))
assert_size_stride(primals_26, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_27, (128,), (1,))
assert_size_stride(primals_28, (128,), (1,))
assert_size_stride(primals_29, (128,), (1,))
assert_size_stride(primals_30, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_31, (128,), (1,))
assert_size_stride(primals_32, (128,), (1,))
assert_size_stride(primals_33, (128,), (1,))
assert_size_stride(primals_34, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_35, (128,), (1,))
assert_size_stride(primals_36, (128,), (1,))
assert_size_stride(primals_37, (128,), (1,))
assert_size_stride(primals_38, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_39, (128,), (1,))
assert_size_stride(primals_40, (128,), (1,))
assert_size_stride(primals_41, (128,), (1,))
assert_size_stride(primals_42, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_43, (128,), (1,))
assert_size_stride(primals_44, (128,), (1,))
assert_size_stride(primals_45, (128,), (1,))
assert_size_stride(primals_46, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_47, (128,), (1,))
assert_size_stride(primals_48, (128,), (1,))
assert_size_stride(primals_49, (128,), (1,))
assert_size_stride(primals_50, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_51, (128,), (1,))
assert_size_stride(primals_52, (128,), (1,))
assert_size_stride(primals_53, (128,), (1,))
assert_size_stride(primals_54, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_55, (64,), (1,))
assert_size_stride(primals_56, (64,), (1,))
assert_size_stride(primals_57, (64,), (1,))
assert_size_stride(primals_58, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_59, (32,), (1,))
assert_size_stride(primals_60, (32,), (1,))
assert_size_stride(primals_61, (32,), (1,))
assert_size_stride(primals_62, (3, 32, 9, 9), (2592, 81, 9, 1))
assert_size_stride(primals_63, (3,), (1,))
assert_size_stride(primals_64, (3,), (1,))
assert_size_stride(primals_65, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 74, 74), (16428, 5476, 74, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(65712)](primals_1, buf0,
65712, XBLOCK=512, num_warps=8, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 32, 66, 66), (139392, 4356, 66, 1))
buf2 = buf1
del buf1
buf5 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 1, 1), torch.float32
)
buf6 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch
.float32)
buf8 = reinterpret_tensor(buf6, (1, 128, 1, 1), (128, 1, 1, 1), 0)
del buf6
triton_red_fused__native_batch_norm_legit_convolution_1[grid(128)](buf2
, buf8, primals_3, buf5, 128, 4356, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
del primals_3
buf3 = empty_strided_cuda((128,), (1,), torch.float32)
triton_poi_fused_repeat_2[grid(128)](primals_4, buf3, 128, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_4
buf4 = empty_strided_cuda((128,), (1,), torch.float32)
triton_poi_fused_repeat_2[grid(128)](primals_5, buf4, 128, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_5
buf9 = empty_strided_cuda((4, 32, 67, 67), (143648, 4489, 67, 1),
torch.float32)
triton_poi_fused_reflection_pad2d_relu_3[grid(574592)](buf2, buf5,
buf8, buf3, buf4, buf9, 574592, XBLOCK=1024, num_warps=4,
num_stages=1)
buf10 = extern_kernels.convolution(buf9, primals_6, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 64, 33, 33), (69696, 1089, 33, 1))
buf11 = buf10
del buf10
buf14 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 1, 1), torch.
float32)
buf15 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
buf17 = reinterpret_tensor(buf15, (1, 256, 1, 1), (256, 1, 1, 1), 0)
del buf15
triton_red_fused__native_batch_norm_legit_convolution_4[grid(256)](
buf11, buf17, primals_7, buf14, 256, 1089, XBLOCK=1, RBLOCK=
2048, num_warps=16, num_stages=1)
del primals_7
buf12 = empty_strided_cuda((256,), (1,), torch.float32)
triton_poi_fused_repeat_5[grid(256)](primals_8, buf12, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_8
buf13 = empty_strided_cuda((256,), (1,), torch.float32)
triton_poi_fused_repeat_5[grid(256)](primals_9, buf13, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_9
buf18 = empty_strided_cuda((4, 64, 34, 34), (73984, 1156, 34, 1),
torch.float32)
triton_poi_fused_reflection_pad2d_relu_6[grid(295936)](buf11, buf14,
buf17, buf12, buf13, buf18, 295936, XBLOCK=1024, num_warps=4,
num_stages=1)
buf19 = extern_kernels.convolution(buf18, primals_10, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf19, (4, 128, 16, 16), (32768, 256, 16, 1))
buf20 = buf19
del buf19
buf23 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch.
float32)
buf24 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
buf26 = reinterpret_tensor(buf24, (1, 512, 1, 1), (512, 1, 1, 1), 0)
del buf24
triton_per_fused__native_batch_norm_legit_convolution_7[grid(512)](
buf20, buf26, primals_11, buf23, 512, 256, num_warps=2,
num_stages=1)
del primals_11
buf21 = empty_strided_cuda((512,), (1,), torch.float32)
triton_poi_fused_repeat_8[grid(512)](primals_12, buf21, 512, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_12
buf22 = empty_strided_cuda((512,), (1,), torch.float32)
triton_poi_fused_repeat_8[grid(512)](primals_13, buf22, 512, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_13
buf27 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1),
torch.float32)
triton_poi_fused_reflection_pad2d_relu_9[grid(165888)](buf20, buf23,
buf26, buf21, buf22, buf27, 165888, XBLOCK=512, num_warps=8,
num_stages=1)
buf28 = extern_kernels.convolution(buf27, primals_14, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 128, 16, 16), (32768, 256, 16, 1))
buf30 = empty_strided_cuda((512,), (1,), torch.float32)
buf31 = empty_strided_cuda((512,), (1,), torch.float32)
buf29 = buf28
del buf28
buf32 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch.
float32)
buf33 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
buf35 = reinterpret_tensor(buf33, (1, 512, 1, 1), (512, 1, 1, 1), 0)
del buf33
buf36 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.float32)
triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_10[
grid(512)](buf29, buf35, primals_16, primals_17, primals_15,
buf30, buf31, buf32, buf36, 512, 256, num_warps=2, num_stages=1)
del primals_15
del primals_16
del primals_17
buf37 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1),
torch.float32)
triton_poi_fused_reflection_pad2d_11[grid(165888)](buf36, buf37,
165888, XBLOCK=512, num_warps=8, num_stages=1)
buf38 = extern_kernels.convolution(buf37, primals_18, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 128, 16, 16), (32768, 256, 16, 1))
buf39 = buf38
del buf38
buf42 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch.
float32)
buf43 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
buf45 = reinterpret_tensor(buf43, (1, 512, 1, 1), (512, 1, 1, 1), 0)
del buf43
triton_per_fused__native_batch_norm_legit_convolution_7[grid(512)](
buf39, buf45, primals_19, buf42, 512, 256, num_warps=2,
num_stages=1)
del primals_19
buf40 = empty_strided_cuda((512,), (1,), torch.float32)
triton_poi_fused_repeat_8[grid(512)](primals_20, buf40, 512, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_20
buf41 = empty_strided_cuda((512,), (1,), torch.float32)
triton_poi_fused_repeat_8[grid(512)](primals_21, buf41, 512, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_21
buf46 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1),
torch.float32)
triton_poi_fused_add_reflection_pad2d_relu_12[grid(165888)](buf36,
buf39, buf42, buf45, buf40, buf41, buf46, 165888, XBLOCK=512,
num_warps=8, num_stages=1)
buf47 = extern_kernels.convolution(buf46, primals_22, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf47, (4, 128, 16, 16), (32768, 256, 16, 1))
buf49 = empty_strided_cuda((512,), (1,), torch.float32)
buf50 = empty_strided_cuda((512,), (1,), torch.float32)
buf48 = buf47
del buf47
buf51 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch.
float32)
buf52 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
buf54 = reinterpret_tensor(buf52, (1, 512, 1, 1), (512, 1, 1, 1), 0)
del buf52
buf55 = buf36
del buf36
triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_10[
grid(512)](buf48, buf54, primals_24, primals_25, primals_23,
buf49, buf50, buf51, buf55, 512, 256, num_warps=2, num_stages=1)
del primals_23
del primals_24
del primals_25
buf56 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1),
torch.float32)
triton_poi_fused_reflection_pad2d_11[grid(165888)](buf55, buf56,
165888, XBLOCK=512, num_warps=8, num_stages=1)
buf57 = extern_kernels.convolution(buf56, primals_26, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf57, (4, 128, 16, 16), (32768, 256, 16, 1))
buf58 = buf57
del buf57
buf61 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch.
float32)
buf62 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
buf64 = reinterpret_tensor(buf62, (1, 512, 1, 1), (512, 1, 1, 1), 0)
del buf62
triton_per_fused__native_batch_norm_legit_convolution_7[grid(512)](
buf58, buf64, primals_27, buf61, 512, 256, num_warps=2,
num_stages=1)
del primals_27
buf59 = empty_strided_cuda((512,), (1,), torch.float32)
triton_poi_fused_repeat_8[grid(512)](primals_28, buf59, 512, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_28
buf60 = empty_strided_cuda((512,), (1,), torch.float32)
triton_poi_fused_repeat_8[grid(512)](primals_29, buf60, 512, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_29
buf65 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1),
torch.float32)
triton_poi_fused_add_reflection_pad2d_relu_12[grid(165888)](buf55,
buf58, buf61, buf64, buf59, buf60, buf65, 165888, XBLOCK=512,
num_warps=8, num_stages=1)
buf66 = extern_kernels.convolution(buf65, primals_30, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf66, (4, 128, 16, 16), (32768, 256, 16, 1))
buf68 = empty_strided_cuda((512,), (1,), torch.float32)
buf69 = empty_strided_cuda((512,), (1,), torch.float32)
buf67 = buf66
del buf66
buf70 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch.
float32)
buf71 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
buf73 = reinterpret_tensor(buf71, (1, 512, 1, 1), (512, 1, 1, 1), 0)
del buf71
buf74 = buf55
del buf55
triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_10[
grid(512)](buf67, buf73, primals_32, primals_33, primals_31,
buf68, buf69, buf70, buf74, 512, 256, num_warps=2, num_stages=1)
del primals_31
del primals_32
del primals_33
buf75 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1),
torch.float32)
triton_poi_fused_reflection_pad2d_11[grid(165888)](buf74, buf75,
165888, XBLOCK=512, num_warps=8, num_stages=1)
buf76 = extern_kernels.convolution(buf75, primals_34, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf76, (4, 128, 16, 16), (32768, 256, 16, 1))
buf77 = buf76
del buf76
buf80 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch.
float32)
buf81 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
buf83 = reinterpret_tensor(buf81, (1, 512, 1, 1), (512, 1, 1, 1), 0)
del buf81
triton_per_fused__native_batch_norm_legit_convolution_7[grid(512)](
buf77, buf83, primals_35, buf80, 512, 256, num_warps=2,
num_stages=1)
del primals_35
buf78 = empty_strided_cuda((512,), (1,), torch.float32)
triton_poi_fused_repeat_8[grid(512)](primals_36, buf78, 512, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_36
buf79 = empty_strided_cuda((512,), (1,), torch.float32)
triton_poi_fused_repeat_8[grid(512)](primals_37, buf79, 512, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_37
buf84 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1),
torch.float32)
triton_poi_fused_add_reflection_pad2d_relu_12[grid(165888)](buf74,
buf77, buf80, buf83, buf78, buf79, buf84, 165888, XBLOCK=512,
num_warps=8, num_stages=1)
buf85 = extern_kernels.convolution(buf84, primals_38, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf85, (4, 128, 16, 16), (32768, 256, 16, 1))
buf87 = empty_strided_cuda((512,), (1,), torch.float32)
buf88 = empty_strided_cuda((512,), (1,), torch.float32)
buf86 = buf85
del buf85
buf89 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch.
float32)
buf90 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
buf92 = reinterpret_tensor(buf90, (1, 512, 1, 1), (512, 1, 1, 1), 0)
del buf90
buf93 = buf74
del buf74
triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_10[
grid(512)](buf86, buf92, primals_40, primals_41, primals_39,
buf87, buf88, buf89, buf93, 512, 256, num_warps=2, num_stages=1)
del primals_39
del primals_40
del primals_41
buf94 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1),
torch.float32)
triton_poi_fused_reflection_pad2d_11[grid(165888)](buf93, buf94,
165888, XBLOCK=512, num_warps=8, num_stages=1)
buf95 = extern_kernels.convolution(buf94, primals_42, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf95, (4, 128, 16, 16), (32768, 256, 16, 1))
buf96 = buf95
del buf95
buf99 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch.
float32)
buf100 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
buf102 = reinterpret_tensor(buf100, (1, 512, 1, 1), (512, 1, 1, 1), 0)
del buf100
triton_per_fused__native_batch_norm_legit_convolution_7[grid(512)](
buf96, buf102, primals_43, buf99, 512, 256, num_warps=2,
num_stages=1)
del primals_43
buf97 = empty_strided_cuda((512,), (1,), torch.float32)
triton_poi_fused_repeat_8[grid(512)](primals_44, buf97, 512, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_44
buf98 = empty_strided_cuda((512,), (1,), torch.float32)
triton_poi_fused_repeat_8[grid(512)](primals_45, buf98, 512, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_45
buf103 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1),
torch.float32)
triton_poi_fused_add_reflection_pad2d_relu_12[grid(165888)](buf93,
buf96, buf99, buf102, buf97, buf98, buf103, 165888, XBLOCK=512,
num_warps=8, num_stages=1)
buf104 = extern_kernels.convolution(buf103, primals_46, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf104, (4, 128, 16, 16), (32768, 256, 16, 1))
buf106 = empty_strided_cuda((512,), (1,), torch.float32)
buf107 = empty_strided_cuda((512,), (1,), torch.float32)
buf105 = buf104
del buf104
buf108 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch.
float32)
buf109 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
buf111 = reinterpret_tensor(buf109, (1, 512, 1, 1), (512, 1, 1, 1), 0)
del buf109
buf112 = buf93
del buf93
triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_10[
grid(512)](buf105, buf111, primals_48, primals_49, primals_47,
buf106, buf107, buf108, buf112, 512, 256, num_warps=2, num_stages=1
)
del primals_47
del primals_48
del primals_49
buf113 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1),
torch.float32)
triton_poi_fused_reflection_pad2d_11[grid(165888)](buf112, buf113,
165888, XBLOCK=512, num_warps=8, num_stages=1)
buf114 = extern_kernels.convolution(buf113, primals_50, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf114, (4, 128, 16, 16), (32768, 256, 16, 1))
buf115 = buf114
del buf114
buf118 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch.
float32)
buf119 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
buf121 = reinterpret_tensor(buf119, (1, 512, 1, 1), (512, 1, 1, 1), 0)
del buf119
triton_per_fused__native_batch_norm_legit_convolution_7[grid(512)](
buf115, buf121, primals_51, buf118, 512, 256, num_warps=2,
num_stages=1)
del primals_51
buf116 = empty_strided_cuda((512,), (1,), torch.float32)
triton_poi_fused_repeat_8[grid(512)](primals_52, buf116, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_52
buf117 = empty_strided_cuda((512,), (1,), torch.float32)
triton_poi_fused_repeat_8[grid(512)](primals_53, buf117, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_53
buf122 = empty_strided_cuda((32,), (1,), torch.int64)
triton_poi_fused_arange_13[grid(32)](buf122, 32, XBLOCK=32,
num_warps=1, num_stages=1)
buf123 = empty_strided_cuda((32,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_14[grid(32)](buf123, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf124 = empty_strided_cuda((4, 128, 34, 32), (139264, 1088, 32, 1),
torch.float32)
triton_poi_fused__unsafe_index_add_reflection_pad2d_relu_15[grid(
557056)](buf123, buf112, buf115, buf118, buf121, buf116, buf117,
buf124, 557056, XBLOCK=1024, num_warps=4, num_stages=1)
del buf112
buf125 = empty_strided_cuda((4, 128, 34, 34), (147968, 1156, 34, 1),
torch.float32)
triton_poi_fused_reflection_pad2d_16[grid(591872)](buf124, buf125,
591872, XBLOCK=512, num_warps=8, num_stages=1)
del buf124
buf126 = extern_kernels.convolution(buf125, primals_54, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf126, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf127 = buf126
del buf126
buf130 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 1, 1), torch.
float32)
buf131 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
buf133 = reinterpret_tensor(buf131, (1, 256, 1, 1), (256, 1, 1, 1), 0)
del buf131
triton_per_fused__native_batch_norm_legit_convolution_17[grid(256)](
buf127, buf133, primals_55, buf130, 256, 1024, num_warps=8,
num_stages=1)
del primals_55
buf128 = empty_strided_cuda((256,), (1,), torch.float32)
triton_poi_fused_repeat_5[grid(256)](primals_56, buf128, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_56
buf129 = empty_strided_cuda((256,), (1,), torch.float32)
triton_poi_fused_repeat_5[grid(256)](primals_57, buf129, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_57
buf134 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused_arange_18[grid(64)](buf134, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf135 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_19[grid(64)](buf135, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf136 = empty_strided_cuda((4, 64, 66, 66), (278784, 4356, 66, 1),
torch.float32)
triton_poi_fused__unsafe_index_reflection_pad2d_relu_20[grid(1115136)](
buf135, buf127, buf130, buf133, buf128, buf129, buf136, 1115136,
XBLOCK=512, num_warps=8, num_stages=1)
buf137 = extern_kernels.convolution(buf136, primals_58, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf137, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf138 = buf137
del buf137
buf141 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 1, 1), torch.
float32)
buf142 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128),
torch.float32)
buf144 = reinterpret_tensor(buf142, (1, 128, 1, 1), (128, 1, 1, 1), 0)
del buf142
triton_red_fused__native_batch_norm_legit_convolution_21[grid(128)](
buf138, buf144, primals_59, buf141, 128, 4096, XBLOCK=1, RBLOCK
=2048, num_warps=16, num_stages=1)
del primals_59
buf139 = empty_strided_cuda((128,), (1,), torch.float32)
triton_poi_fused_repeat_2[grid(128)](primals_60, buf139, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_60
buf140 = empty_strided_cuda((128,), (1,), torch.float32)
triton_poi_fused_repeat_2[grid(128)](primals_61, buf140, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_61
buf145 = empty_strided_cuda((4, 32, 72, 72), (165888, 5184, 72, 1),
torch.float32)
triton_poi_fused_reflection_pad2d_relu_22[grid(663552)](buf138,
buf141, buf144, buf139, buf140, buf145, 663552, XBLOCK=512,
num_warps=8, num_stages=1)
buf146 = extern_kernels.convolution(buf145, primals_62, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf146, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf148 = empty_strided_cuda((12,), (1,), torch.float32)
buf147 = buf146
del buf146
buf149 = empty_strided_cuda((1, 12, 1, 1), (12, 1, 12, 12), torch.
float32)
buf153 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1),
torch.float32)
buf152 = empty_strided_cuda((1, 12, 1, 1), (12, 1, 12, 12), torch.
float32)
triton_red_fused__native_batch_norm_legit_convolution_repeat_sigmoid_23[
grid(12)](buf147, primals_64, primals_63, primals_65, buf148,
buf149, buf153, buf152, 12, 4096, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
del primals_63
del primals_64
del primals_65
return (buf153, primals_2, primals_6, primals_10, primals_14,
primals_18, primals_22, primals_26, primals_30, primals_34,
primals_38, primals_42, primals_46, primals_50, primals_54,
primals_58, primals_62, buf0, buf2, buf3, buf4, buf5, buf8, buf9,
buf11, buf12, buf13, buf14, buf17, buf18, buf20, buf21, buf22,
buf23, buf26, buf27, buf29, buf30, buf31, buf32, buf35, buf37,
buf39, buf40, buf41, buf42, buf45, buf46, buf48, buf49, buf50,
buf51, buf54, buf56, buf58, buf59, buf60, buf61, buf64, buf65,
buf67, buf68, buf69, buf70, buf73, buf75, buf77, buf78, buf79,
buf80, buf83, buf84, buf86, buf87, buf88, buf89, buf92, buf94,
buf96, buf97, buf98, buf99, buf102, buf103, buf105, buf106, buf107,
buf108, buf111, buf113, buf115, buf116, buf117, buf118, buf121,
buf122, buf123, buf125, buf127, buf128, buf129, buf130, buf133,
buf134, buf135, buf136, buf138, buf139, buf140, buf141, buf144,
buf145, buf147, buf148, reinterpret_tensor(buf152, (12,), (1,), 0),
buf153, reinterpret_tensor(buf149, (1, 12, 1, 1), (12, 1, 1, 1), 0))
class GenericLayer(nn.Module):
def __init__(self, layer, out_channels, padding=(0, 0, 0, 0),
activation=None):
super(GenericLayer, self).__init__()
self._act = activation
self._layer = layer
self._norm = nn.InstanceNorm2d(out_channels, affine=True)
self._pad = nn.ReflectionPad2d(padding)
def forward(self, x):
x = self._pad(x)
x = self._layer(x)
x = self._norm(x)
if self._act is not None:
x = self._act(x)
return x
class ResidualBlock(nn.Module):
def __init__(self, channels, kernel_size, stride, padding=(0, 0, 0, 0)):
super(ResidualBlock, self).__init__()
self._conv_1 = GenericLayer(nn.Conv2d(128, 128, 3, 1), 128, (1, 1,
1, 1), nn.ReLU())
self._conv_2 = GenericLayer(nn.Conv2d(128, 128, 3, 1), 128, (1, 1,
1, 1), nn.ReLU())
def forward(self, x):
x = self._conv_1(x)
x = x + self._conv_2(x)
return x
class UpsampleConvLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride,
scale_factor):
super(UpsampleConvLayer, self).__init__()
self._scale_factor = scale_factor
self._reflection_pad = nn.ReflectionPad2d(kernel_size // 2)
self._conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, x):
x = nn.functional.interpolate(x, mode='nearest', scale_factor=self.
_scale_factor)
x = self._reflection_pad(x)
x = self._conv(x)
return x
class TransferNetNew(nn.Module):
def __init__(self):
super(TransferNetNew, self).__init__()
self._conv_1 = GenericLayer(nn.Conv2d(3, 32, 9, 1), 32, (5, 5, 5, 5
), nn.ReLU())
self._conv_2 = GenericLayer(nn.Conv2d(32, 64, 3, 2), 64, (1, 0, 1,
0), nn.ReLU())
self._conv_3 = GenericLayer(nn.Conv2d(64, 128, 3, 2), 128, (1, 0, 1,
0), nn.ReLU())
self._res_1 = ResidualBlock(128, 3, 1, (1, 1, 1, 1))
self._res_2 = ResidualBlock(128, 3, 1, (1, 1, 1, 1))
self._res_3 = ResidualBlock(128, 3, 1, (1, 1, 1, 1))
self._res_4 = ResidualBlock(128, 3, 1, (1, 1, 1, 1))
self._res_5 = ResidualBlock(128, 3, 1, (1, 1, 1, 1))
self._conv_4 = GenericLayer(UpsampleConvLayer(128, 64, 3, 1, 2), 64,
(0, 0, 0, 0), nn.ReLU())
self._conv_5 = GenericLayer(UpsampleConvLayer(64, 32, 3, 1, 2), 32,
(0, 0, 0, 0), nn.ReLU())
self._conv_6 = GenericLayer(nn.Conv2d(32, 3, 9, 1), 3, (4, 4, 4, 4),
nn.Sigmoid())
def forward(self, input_0):
primals_2 = self._conv_1._layer.weight
primals_3 = self._conv_1._layer.bias
primals_4 = self._conv_1._norm.weight
primals_5 = self._conv_1._norm.bias
primals_6 = self._conv_2._layer.weight
primals_7 = self._conv_2._layer.bias
primals_8 = self._conv_2._norm.weight
primals_9 = self._conv_2._norm.bias
primals_10 = self._conv_3._layer.weight
primals_11 = self._conv_3._layer.bias
primals_12 = self._conv_3._norm.weight
primals_13 = self._conv_3._norm.bias
primals_14 = self._res_1._conv_1._layer.weight
primals_15 = self._res_1._conv_1._layer.bias
primals_16 = self._res_1._conv_1._norm.weight
primals_17 = self._res_1._conv_1._norm.bias
primals_18 = self._res_1._conv_2._layer.weight
primals_19 = self._res_1._conv_2._layer.bias
primals_20 = self._res_1._conv_2._norm.weight
primals_21 = self._res_1._conv_2._norm.bias
primals_22 = self._res_2._conv_1._layer.weight
primals_23 = self._res_2._conv_1._layer.bias
primals_24 = self._res_2._conv_1._norm.weight
primals_25 = self._res_2._conv_1._norm.bias
primals_26 = self._res_2._conv_2._layer.weight
primals_27 = self._res_2._conv_2._layer.bias
primals_28 = self._res_2._conv_2._norm.weight
primals_29 = self._res_2._conv_2._norm.bias
primals_30 = self._res_3._conv_1._layer.weight
primals_31 = self._res_3._conv_1._layer.bias
primals_32 = self._res_3._conv_1._norm.weight
primals_33 = self._res_3._conv_1._norm.bias
primals_34 = self._res_3._conv_2._layer.weight
primals_35 = self._res_3._conv_2._layer.bias
primals_36 = self._res_3._conv_2._norm.weight
primals_37 = self._res_3._conv_2._norm.bias
primals_38 = self._res_4._conv_1._layer.weight
primals_39 = self._res_4._conv_1._layer.bias
primals_40 = self._res_4._conv_1._norm.weight
primals_41 = self._res_4._conv_1._norm.bias
primals_42 = self._res_4._conv_2._layer.weight
primals_43 = self._res_4._conv_2._layer.bias
primals_44 = self._res_4._conv_2._norm.weight
primals_45 = self._res_4._conv_2._norm.bias
primals_46 = self._res_5._conv_1._layer.weight
primals_47 = self._res_5._conv_1._layer.bias
primals_48 = self._res_5._conv_1._norm.weight
primals_49 = self._res_5._conv_1._norm.bias
primals_50 = self._res_5._conv_2._layer.weight
primals_51 = self._res_5._conv_2._layer.bias
primals_52 = self._res_5._conv_2._norm.weight
primals_53 = self._res_5._conv_2._norm.bias
primals_54 = self._conv_4._layer._conv.weight
primals_55 = self._conv_4._layer._conv.bias
primals_56 = self._conv_4._norm.weight
primals_57 = self._conv_4._norm.bias
primals_58 = self._conv_5._layer._conv.weight
primals_59 = self._conv_5._layer._conv.bias
primals_60 = self._conv_5._norm.weight
primals_61 = self._conv_5._norm.bias
primals_62 = self._conv_6._layer.weight
primals_63 = self._conv_6._layer.bias
primals_64 = self._conv_6._norm.weight
primals_65 = self._conv_6._norm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40, primals_41, primals_42, primals_43, primals_44,
primals_45, primals_46, primals_47, primals_48, primals_49,
primals_50, primals_51, primals_52, primals_53, primals_54,
primals_55, primals_56, primals_57, primals_58, primals_59,
primals_60, primals_61, primals_62, primals_63, primals_64,
primals_65])
return output[0]
| ThomasRanvier/cnn_style_transfer | TransferNet | false | 1,222 | [
"MIT"
] | 0 | 90b6c76c20263c22f4e45184d572284726ecbd7b | https://github.com/ThomasRanvier/cnn_style_transfer/tree/90b6c76c20263c22f4e45184d572284726ecbd7b | import torch
import torch.nn as nn
class GenericLayer(nn.Module):
def __init__(self, layer, out_channels, padding=(0, 0, 0, 0),
activation=None):
super().__init__()
self._act = activation
self._layer = layer
self._norm = nn.InstanceNorm2d(out_channels, affine=True)
self._pad = nn.ReflectionPad2d(padding)
def forward(self, x):
x = self._pad(x)
x = self._layer(x)
x = self._norm(x)
if self._act is not None:
x = self._act(x)
return x
class ResidualBlock(nn.Module):
def __init__(self, channels, kernel_size, stride, padding=(0, 0, 0, 0)):
super().__init__()
self._conv_1 = GenericLayer(nn.Conv2d(128, 128, 3, 1), 128, (1, 1,
1, 1), nn.ReLU())
self._conv_2 = GenericLayer(nn.Conv2d(128, 128, 3, 1), 128, (1, 1,
1, 1), nn.ReLU())
def forward(self, x):
x = self._conv_1(x)
x = x + self._conv_2(x)
return x
class UpsampleConvLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride,
scale_factor):
super().__init__()
self._scale_factor = scale_factor
self._reflection_pad = nn.ReflectionPad2d(kernel_size // 2)
self._conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, x):
x = nn.functional.interpolate(x, mode='nearest', scale_factor=self.
_scale_factor)
x = self._reflection_pad(x)
x = self._conv(x)
return x
class Model(nn.Module):
def __init__(self):
super().__init__()
self._conv_1 = GenericLayer(nn.Conv2d(3, 32, 9, 1), 32, (5, 5, 5, 5
), nn.ReLU())
self._conv_2 = GenericLayer(nn.Conv2d(32, 64, 3, 2), 64, (1, 0, 1,
0), nn.ReLU())
self._conv_3 = GenericLayer(nn.Conv2d(64, 128, 3, 2), 128, (1, 0, 1,
0), nn.ReLU())
self._res_1 = ResidualBlock(128, 3, 1, (1, 1, 1, 1))
self._res_2 = ResidualBlock(128, 3, 1, (1, 1, 1, 1))
self._res_3 = ResidualBlock(128, 3, 1, (1, 1, 1, 1))
self._res_4 = ResidualBlock(128, 3, 1, (1, 1, 1, 1))
self._res_5 = ResidualBlock(128, 3, 1, (1, 1, 1, 1))
self._conv_4 = GenericLayer(UpsampleConvLayer(128, 64, 3, 1, 2), 64,
(0, 0, 0, 0), nn.ReLU())
self._conv_5 = GenericLayer(UpsampleConvLayer(64, 32, 3, 1, 2), 32,
(0, 0, 0, 0), nn.ReLU())
self._conv_6 = GenericLayer(nn.Conv2d(32, 3, 9, 1), 3, (4, 4, 4, 4),
nn.Sigmoid())
def forward(self, x):
x = self._conv_1(x)
x = self._conv_2(x)
x = self._conv_3(x)
x = self._res_1(x)
x = self._res_2(x)
x = self._res_3(x)
x = self._res_4(x)
x = self._res_5(x)
x = self._conv_4(x)
x = self._conv_5(x)
x = self._conv_6(x)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return []
|
MSE_Loss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/xg/cxgsfonssev53pom6rhmghpoovsnhacnw2p7ufznc2l3tkmjlft4.py
# Topologically Sorted Source Nodes: [sub, pow_1, mse_loss, sum_2, mse_loss_1], Original ATen: [aten.sub, aten.pow, aten.sum, aten.div]
# Source node to ATen node mapping:
# mse_loss => sum_1
# mse_loss_1 => div
# pow_1 => pow_1
# sub => sub
# sum_2 => sum_2
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_1,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sum_1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, 1), kwargs = {})
triton_per_fused_div_pow_sub_sum_0 = async_compile.triton('triton_per_fused_div_pow_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_pow_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 1.0
tmp8 = tmp6 * tmp7
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp8, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sub, pow_1, mse_loss, sum_2, mse_loss_1], Original ATen: [aten.sub, aten.pow, aten.sum, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_div_pow_sub_sum_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class MSE_Loss(nn.Module):
def __init__(self, sum_dim=None, sqrt=False, dimension_warn=0):
super().__init__()
self.sum_dim = sum_dim
self.sqrt = sqrt
self.dimension_warn = dimension_warn
def forward(self, x, y):
assert x.shape == y.shape
if self.sum_dim:
mse_loss = torch.sum((x - y) ** 2, dim=self.sum_dim)
else:
mse_loss = torch.sum((x - y) ** 2)
if self.sqrt:
mse_loss = torch.sqrt(mse_loss)
mse_loss = torch.sum(mse_loss) / mse_loss.nelement()
if len(mse_loss.shape) > self.dimension_warn:
raise ValueError(
'The shape of mse loss should be a scalar, but you can skip thiserror by change the dimension_warn explicitly.'
)
return mse_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 1.0
tmp8 = tmp6 * tmp7
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp8, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_div_pow_sub_sum_0[grid(1)](buf1, arg0_1, arg1_1, 1,
256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class MSE_LossNew(nn.Module):
def __init__(self, sum_dim=None, sqrt=False, dimension_warn=0):
super().__init__()
self.sum_dim = sum_dim
self.sqrt = sqrt
self.dimension_warn = dimension_warn
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| WorksApplications/omni_torch | MSE_Loss | false | 1,223 | [
"Apache-2.0"
] | 0 | 10b689d794c8f485e38c765303ef018da17bc641 | https://github.com/WorksApplications/omni_torch/tree/10b689d794c8f485e38c765303ef018da17bc641 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, sum_dim=None, sqrt=False, dimension_warn=0):
super().__init__()
self.sum_dim = sum_dim
self.sqrt = sqrt
self.dimension_warn = dimension_warn
def forward(self, x, y):
assert x.shape == y.shape
if self.sum_dim:
mse_loss = torch.sum((x - y) ** 2, dim=self.sum_dim)
else:
mse_loss = torch.sum((x - y) ** 2)
if self.sqrt:
mse_loss = torch.sqrt(mse_loss)
mse_loss = torch.sum(mse_loss) / mse_loss.nelement()
if len(mse_loss.shape) > self.dimension_warn:
raise ValueError(
'The shape of mse loss should be a scalar, but you can skip thiserror by change the dimension_warn explicitly.'
)
return mse_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
KL_Divergence | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/36/c36tvqtkgwhzybfsqd7qdh2wdiwvqkxzwfm3jxeqrcebr6bro6qj.py
# Topologically Sorted Source Nodes: [norm_1], Original ATen: [aten.linalg_vector_norm]
# Source node to ATen node mapping:
# norm_1 => abs_2, sum_2
# Graph fragment:
# %abs_2 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%view_1,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%abs_2, [-1]), kwargs = {})
triton_per_fused_linalg_vector_norm_0 = async_compile.triton('triton_per_fused_linalg_vector_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_linalg_vector_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl_math.abs(tmp0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/hh/chh2l34diwmmt3zcli5z4lnk5ecukhm76s53o6ngb6zwtkqn5huy.py
# Topologically Sorted Source Nodes: [y_1, log, x_1, log_1, sub, mul, loss], Original ATen: [aten.div, aten.log, aten.sub, aten.mul, aten.sum]
# Source node to ATen node mapping:
# log => log
# log_1 => log_1
# loss => sum_3
# mul => mul
# sub => sub
# x_1 => div
# y_1 => div_1
# Graph fragment:
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_1, %unsqueeze_1), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%div_1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view, %unsqueeze), kwargs = {})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%div,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%log, %log_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, %sub), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, None), kwargs = {})
triton_per_fused_div_log_mul_sub_sum_1 = async_compile.triton('triton_per_fused_div_log_mul_sub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_log_mul_sub_sum_1', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_log_mul_sub_sum_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r2 = rindex
r1 = (rindex // 16)
tmp0 = tl.load(in_ptr0 + (r2), None)
tmp1 = tl.load(in_ptr1 + (r1), None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + (r2), None)
tmp5 = tl.load(in_ptr3 + (r1), None, eviction_policy='evict_last')
tmp2 = tmp0 / tmp1
tmp3 = tl_math.log(tmp2)
tmp6 = tmp4 / tmp5
tmp7 = tl_math.log(tmp6)
tmp8 = tmp3 - tmp7
tmp9 = tmp2 * tmp8
tmp10 = tl.broadcast_to(tmp9, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp12, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [norm_1], Original ATen: [aten.linalg_vector_norm]
stream0 = get_raw_stream(0)
triton_per_fused_linalg_vector_norm_0.run(arg1_1, buf0, 16, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [norm], Original ATen: [aten.linalg_vector_norm]
triton_per_fused_linalg_vector_norm_0.run(arg0_1, buf1, 16, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [y_1, log, x_1, log_1, sub, mul, loss], Original ATen: [aten.div, aten.log, aten.sub, aten.mul, aten.sum]
triton_per_fused_div_log_mul_sub_sum_1.run(arg1_1, buf0, arg0_1, buf1, buf2, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del buf0
del buf1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class KL_Divergence(nn.Module):
def __init__(self, sum_dim=None, sqrt=False, dimension_warn=0):
super().__init__()
self.sum_dim = sum_dim
self.sqrt = sqrt
self.dimension_warn = dimension_warn
def forward(self, x, y):
x = x.view(x.size(0), x.size(1), -1)
x = x / x.norm(1, dim=-1).unsqueeze(-1)
y = y.view(y.size(0), y.size(1), -1)
y = y / y.norm(1, dim=-1).unsqueeze(-1)
loss = torch.sum(y * (y.log() - x.log()), dim=self.sum_dim)
return loss.squeeze()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl_math.abs(tmp0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_per_fused_div_log_mul_sub_sum_1(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + r2, None)
tmp1 = tl.load(in_ptr1 + r1, None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + r2, None)
tmp5 = tl.load(in_ptr3 + r1, None, eviction_policy='evict_last')
tmp2 = tmp0 / tmp1
tmp3 = tl_math.log(tmp2)
tmp6 = tmp4 / tmp5
tmp7 = tl_math.log(tmp6)
tmp8 = tmp3 - tmp7
tmp9 = tmp2 * tmp8
tmp10 = tl.broadcast_to(tmp9, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_linalg_vector_norm_0[grid(16)](arg1_1, buf0, 16,
16, XBLOCK=8, num_warps=2, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_per_fused_linalg_vector_norm_0[grid(16)](arg0_1, buf1, 16,
16, XBLOCK=8, num_warps=2, num_stages=1)
buf2 = empty_strided_cuda((), (), torch.float32)
triton_per_fused_div_log_mul_sub_sum_1[grid(1)](arg1_1, buf0,
arg0_1, buf1, buf2, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del buf0
del buf1
return buf2,
class KL_DivergenceNew(nn.Module):
def __init__(self, sum_dim=None, sqrt=False, dimension_warn=0):
super().__init__()
self.sum_dim = sum_dim
self.sqrt = sqrt
self.dimension_warn = dimension_warn
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| WorksApplications/omni_torch | KL_Divergence | false | 1,224 | [
"Apache-2.0"
] | 0 | 10b689d794c8f485e38c765303ef018da17bc641 | https://github.com/WorksApplications/omni_torch/tree/10b689d794c8f485e38c765303ef018da17bc641 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, sum_dim=None, sqrt=False, dimension_warn=0):
super().__init__()
self.sum_dim = sum_dim
self.sqrt = sqrt
self.dimension_warn = dimension_warn
def forward(self, x, y):
x = x.view(x.size(0), x.size(1), -1)
x = x / x.norm(1, dim=-1).unsqueeze(-1)
y = y.view(y.size(0), y.size(1), -1)
y = y / y.norm(1, dim=-1).unsqueeze(-1)
loss = torch.sum(y * (y.log() - x.log()), dim=self.sum_dim)
return loss.squeeze()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
FC_Layer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ck/cckzkicqhrcfrkfdju5exjvt5jnqvtxaazswol3wnjnjike2p45t.py
# Topologically Sorted Source Nodes: [input_2], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# input_2 => sigmoid
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_3), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_sigmoid_0 = async_compile.triton('triton_poi_fused_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (1, 4), (4, 1))
assert_size_stride(primals_3, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 1), (1, 4), 0), out=buf0)
del primals_2
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [input_2], Original ATen: [aten.sigmoid]
stream0 = get_raw_stream(0)
triton_poi_fused_sigmoid_0.run(buf1, primals_3, 4, grid=grid(4), stream=stream0)
del primals_3
return (buf1, primals_1, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def standardize(param, assert_length):
if type(param) is not list and type(param) is not tuple:
param = [param] * assert_length
assert len(param
) == assert_length, 'expect %s input params, got %s input parameter' % (
assert_length, len(param))
return param
def fc_layer(input, layer_size, bias=True, name=None, activation=nn.Sigmoid
(), batch_norm=None, dropout=0):
layer_size = [input] + [layer_size] if type(layer_size) is not list else [
input] + layer_size
assert_length = len(layer_size) - 1
bias = standardize(bias, assert_length)
activation = standardize(activation, assert_length)
batch_norm = standardize(batch_norm, assert_length)
dropout = standardize(dropout, assert_length)
if name is None:
name = ''
modules = nn.Sequential()
for i in range(len(layer_size) - 1):
modules.add_module(name + '_fc_' + str(i), nn.Linear(layer_size[i],
layer_size[i + 1], bias[i]))
if batch_norm[i]:
modules.add_module(name + 'bn_' + str(i), batch_norm[i](
layer_size[i + 1]))
if activation[i]:
modules.add_module(name + 'act_' + str(i), activation[i])
if dropout[i] > 0:
modules.add_module(name + 'drop_' + str(i), nn.Dropout2d(
dropout[i]))
return modules
class FC_Layer(nn.Module):
def __init__(self, input, layer_size, bias=True, name=None, activation=
nn.Sigmoid(), batch_norm=None, dropout=0):
super().__init__()
self.fc_layer = fc_layer(input, layer_size, bias=bias, name=name,
activation=activation, batch_norm=batch_norm, dropout=dropout)
def forward(self, x, batch_dim=0):
if len(x.shape):
x = x.view(x.size(batch_dim), -1)
return self.fc_layer.forward(x)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input': 4, 'layer_size': 1}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (1, 4), (4, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 1),
(1, 4), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_sigmoid_0[grid(4)](buf1, primals_3, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_3
return buf1, primals_1, buf1
def standardize(param, assert_length):
if type(param) is not list and type(param) is not tuple:
param = [param] * assert_length
assert len(param
) == assert_length, 'expect %s input params, got %s input parameter' % (
assert_length, len(param))
return param
def fc_layer(input, layer_size, bias=True, name=None, activation=nn.Sigmoid
(), batch_norm=None, dropout=0):
layer_size = [input] + [layer_size] if type(layer_size) is not list else [
input] + layer_size
assert_length = len(layer_size) - 1
bias = standardize(bias, assert_length)
activation = standardize(activation, assert_length)
batch_norm = standardize(batch_norm, assert_length)
dropout = standardize(dropout, assert_length)
if name is None:
name = ''
modules = nn.Sequential()
for i in range(len(layer_size) - 1):
modules.add_module(name + '_fc_' + str(i), nn.Linear(layer_size[i],
layer_size[i + 1], bias[i]))
if batch_norm[i]:
modules.add_module(name + 'bn_' + str(i), batch_norm[i](
layer_size[i + 1]))
if activation[i]:
modules.add_module(name + 'act_' + str(i), activation[i])
if dropout[i] > 0:
modules.add_module(name + 'drop_' + str(i), nn.Dropout2d(
dropout[i]))
return modules
class FC_LayerNew(nn.Module):
def __init__(self, input, layer_size, bias=True, name=None, activation=
nn.Sigmoid(), batch_norm=None, dropout=0):
super().__init__()
self.fc_layer = fc_layer(input, layer_size, bias=bias, name=name,
activation=activation, batch_norm=batch_norm, dropout=dropout)
def forward(self, input_0):
primals_2 = self.fc_layer._fc_0.weight
primals_3 = self.fc_layer._fc_0.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| WorksApplications/omni_torch | FC_Layer | false | 1,225 | [
"Apache-2.0"
] | 0 | 10b689d794c8f485e38c765303ef018da17bc641 | https://github.com/WorksApplications/omni_torch/tree/10b689d794c8f485e38c765303ef018da17bc641 | import torch
import torch.nn as nn
def standardize(param, assert_length):
if type(param) is not list and type(param) is not tuple:
param = [param] * assert_length
assert len(param
) == assert_length, 'expect %s input params, got %s input parameter' % (
assert_length, len(param))
return param
def fc_layer(input, layer_size, bias=True, name=None, activation=nn.Sigmoid
(), batch_norm=None, dropout=0):
layer_size = [input] + [layer_size] if type(layer_size) is not list else [
input] + layer_size
assert_length = len(layer_size) - 1
bias = standardize(bias, assert_length)
activation = standardize(activation, assert_length)
batch_norm = standardize(batch_norm, assert_length)
dropout = standardize(dropout, assert_length)
if name is None:
name = ''
modules = nn.Sequential()
for i in range(len(layer_size) - 1):
modules.add_module(name + '_fc_' + str(i), nn.Linear(layer_size[i],
layer_size[i + 1], bias[i]))
if batch_norm[i]:
modules.add_module(name + 'bn_' + str(i), batch_norm[i](
layer_size[i + 1]))
if activation[i]:
modules.add_module(name + 'act_' + str(i), activation[i])
if dropout[i] > 0:
modules.add_module(name + 'drop_' + str(i), nn.Dropout2d(
dropout[i]))
return modules
class Model(nn.Module):
def __init__(self, input, layer_size, bias=True, name=None, activation=
nn.Sigmoid(), batch_norm=None, dropout=0):
super().__init__()
self.fc_layer = fc_layer(input, layer_size, bias=bias, name=name,
activation=activation, batch_norm=batch_norm, dropout=dropout)
def forward(self, x, batch_dim=0):
if len(x.shape):
x = x.view(x.size(batch_dim), -1)
return self.fc_layer.forward(x)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [4, 1]
|
JS_Divergence | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/sb/csbrhoo2n4y5iym2xea6cnh5zhokxttxk4s4a5zwcxw52tq3eveo.py
# Topologically Sorted Source Nodes: [kl_div, kl_div_1, add], Original ATen: [aten.xlogy, aten.mul, aten.sub, aten.mean, aten.add]
# Source node to ATen node mapping:
# add => add
# kl_div => eq, full_default, full_default_1, isnan, log, mean, mul, mul_1, sub, where, where_1
# kl_div_1 => eq_1, full_default_2, full_default_3, isnan_1, log_1, mean_1, mul_2, mul_3, sub_1, where_2, where_3
# Graph fragment:
# %isnan : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%arg0_1,), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], nan), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%arg0_1, 0), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%arg0_1,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %log), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %mul_1), kwargs = {})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%isnan, %full_default_1, %where), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %mul), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub,), kwargs = {})
# %isnan_1 : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%arg1_1,), kwargs = {})
# %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], nan), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %eq_1 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%arg1_1, 0), kwargs = {})
# %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%arg1_1,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %log_1), kwargs = {})
# %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_1, %full_default_2, %mul_3), kwargs = {})
# %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%isnan_1, %full_default_3, %where_2), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_3, %mul_2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, %mean_1), kwargs = {})
triton_per_fused_add_mean_mul_sub_xlogy_0 = async_compile.triton('triton_per_fused_add_mean_mul_sub_xlogy_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_mul_sub_xlogy_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mean_mul_sub_xlogy_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp9 = tl.load(in_ptr1 + (r0), None)
tmp1 = libdevice.isnan(tmp0).to(tl.int1)
tmp2 = 0.0
tmp3 = tmp0 == tmp2
tmp4 = tl_math.log(tmp0)
tmp5 = tmp0 * tmp4
tmp6 = tl.where(tmp3, tmp2, tmp5)
tmp7 = float("nan")
tmp8 = tl.where(tmp1, tmp7, tmp6)
tmp10 = tmp0 * tmp9
tmp11 = tmp8 - tmp10
tmp12 = tl.broadcast_to(tmp11, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tmp15 = libdevice.isnan(tmp9).to(tl.int1)
tmp16 = tmp9 == tmp2
tmp17 = tl_math.log(tmp9)
tmp18 = tmp9 * tmp17
tmp19 = tl.where(tmp16, tmp2, tmp18)
tmp20 = tl.where(tmp15, tmp7, tmp19)
tmp21 = tmp9 * tmp0
tmp22 = tmp20 - tmp21
tmp23 = tl.broadcast_to(tmp22, [RBLOCK])
tmp25 = triton_helpers.promote_to_tensor(tl.sum(tmp23, 0))
tmp26 = 256.0
tmp27 = tmp14 / tmp26
tmp28 = tmp25 / tmp26
tmp29 = tmp27 + tmp28
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp29, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [kl_div, kl_div_1, add], Original ATen: [aten.xlogy, aten.mul, aten.sub, aten.mean, aten.add]
stream0 = get_raw_stream(0)
triton_per_fused_add_mean_mul_sub_xlogy_0.run(buf2, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class JS_Divergence(nn.Module):
def __init__(self):
super().__init__()
self.engine = nn.KLDivLoss()
def forward(self, x, y):
return self.engine(x, y) + self.engine(y, x)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mean_mul_sub_xlogy_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp9 = tl.load(in_ptr1 + r0, None)
tmp1 = libdevice.isnan(tmp0).to(tl.int1)
tmp2 = 0.0
tmp3 = tmp0 == tmp2
tmp4 = tl_math.log(tmp0)
tmp5 = tmp0 * tmp4
tmp6 = tl.where(tmp3, tmp2, tmp5)
tmp7 = float('nan')
tmp8 = tl.where(tmp1, tmp7, tmp6)
tmp10 = tmp0 * tmp9
tmp11 = tmp8 - tmp10
tmp12 = tl.broadcast_to(tmp11, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tmp15 = libdevice.isnan(tmp9).to(tl.int1)
tmp16 = tmp9 == tmp2
tmp17 = tl_math.log(tmp9)
tmp18 = tmp9 * tmp17
tmp19 = tl.where(tmp16, tmp2, tmp18)
tmp20 = tl.where(tmp15, tmp7, tmp19)
tmp21 = tmp9 * tmp0
tmp22 = tmp20 - tmp21
tmp23 = tl.broadcast_to(tmp22, [RBLOCK])
tmp25 = triton_helpers.promote_to_tensor(tl.sum(tmp23, 0))
tmp26 = 256.0
tmp27 = tmp14 / tmp26
tmp28 = tmp25 / tmp26
tmp29 = tmp27 + tmp28
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp29, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_mean_mul_sub_xlogy_0[grid(1)](buf2, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf2,
class JS_DivergenceNew(nn.Module):
def __init__(self):
super().__init__()
self.engine = nn.KLDivLoss()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| WorksApplications/omni_torch | JS_Divergence | false | 1,226 | [
"Apache-2.0"
] | 0 | 10b689d794c8f485e38c765303ef018da17bc641 | https://github.com/WorksApplications/omni_torch/tree/10b689d794c8f485e38c765303ef018da17bc641 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
self.engine = nn.KLDivLoss()
def forward(self, x, y):
return self.engine(x, y) + self.engine(y, x)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
ContextGate | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ms/cmsuzohbg5nq52jnvirovzkvykrzzko5xomu7zyu5e5u2lhegppw.py
# Topologically Sorted Source Nodes: [context_input], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# context_input => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/yi/cyi6r7vs4j3l42jso4lbypleykq4kzejvirygvgggapmeah5esms.py
# Topologically Sorted Source Nodes: [context_gate, mul, sub, mul_1, context_fusion, tanh], Original ATen: [aten.sigmoid, aten.mul, aten.rsub, aten.add, aten.tanh]
# Source node to ATen node mapping:
# context_fusion => add
# context_gate => sigmoid
# mul => mul
# mul_1 => mul_1
# sub => sub
# tanh => tanh
# Graph fragment:
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%addmm,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %primals_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %sigmoid), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %primals_2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {})
triton_poi_fused_add_mul_rsub_sigmoid_tanh_1 = async_compile.triton('triton_poi_fused_add_mul_rsub_sigmoid_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_sigmoid_tanh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_tanh_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask)
tmp6 = tl.load(in_ptr2 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = 1.0
tmp5 = tmp4 - tmp1
tmp7 = tmp5 * tmp6
tmp8 = tmp3 + tmp7
tmp9 = libdevice.tanh(tmp8)
tl.store(out_ptr0 + (x0), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [context_input], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [context_gate, mul, sub, mul_1, context_fusion, tanh], Original ATen: [aten.sigmoid, aten.mul, aten.rsub, aten.add, aten.tanh]
triton_poi_fused_add_mul_rsub_sigmoid_tanh_1.run(buf1, primals_1, primals_2, buf2, 16, grid=grid(16), stream=stream0)
return (buf2, primals_1, primals_2, buf0, buf1, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.multiprocessing
from torch import nn
import torch.utils.data
class ContextGate(nn.Module):
def __init__(self, vector_dim, topic_dim):
super().__init__()
assert vector_dim == topic_dim
self.fusion_linear = nn.Linear(vector_dim + topic_dim, vector_dim)
self.sigmoid = nn.Sigmoid()
self.tanh = nn.Tanh()
def forward(self, source_vector, other_vector):
context_input = torch.cat((source_vector, other_vector), dim=1)
context_gate = self.sigmoid(self.fusion_linear(context_input))
context_fusion = context_gate * source_vector + (1.0 - context_gate
) * other_vector
return self.tanh(context_fusion)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'vector_dim': 4, 'topic_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.multiprocessing
from torch import nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_tanh_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp6 = tl.load(in_ptr2 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = 1.0
tmp5 = tmp4 - tmp1
tmp7 = tmp5 * tmp6
tmp8 = tmp3 + tmp7
tmp9 = libdevice.tanh(tmp8)
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3,
(8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_mul_rsub_sigmoid_tanh_1[grid(16)](buf1,
primals_1, primals_2, buf2, 16, XBLOCK=16, num_warps=1,
num_stages=1)
return buf2, primals_1, primals_2, buf0, buf1, buf2
class ContextGateNew(nn.Module):
def __init__(self, vector_dim, topic_dim):
super().__init__()
assert vector_dim == topic_dim
self.fusion_linear = nn.Linear(vector_dim + topic_dim, vector_dim)
self.sigmoid = nn.Sigmoid()
self.tanh = nn.Tanh()
def forward(self, input_0, input_1):
primals_3 = self.fusion_linear.weight
primals_4 = self.fusion_linear.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| WuDiDaBinGe/TAKG | ContextGate | false | 1,227 | [
"MIT"
] | 0 | 83e608e677a4ee74722d18cb5ef430f4f6c6ad31 | https://github.com/WuDiDaBinGe/TAKG/tree/83e608e677a4ee74722d18cb5ef430f4f6c6ad31 | import torch
import torch.multiprocessing
from torch import nn
import torch.utils.data
class Model(nn.Module):
def __init__(self, vector_dim, topic_dim):
super().__init__()
assert vector_dim == topic_dim
self.fusion_linear = nn.Linear(vector_dim + topic_dim, vector_dim)
self.sigmoid = nn.Sigmoid()
self.tanh = nn.Tanh()
def forward(self, source_vector, other_vector):
context_input = torch.cat((source_vector, other_vector), dim=1)
context_gate = self.sigmoid(self.fusion_linear(context_input))
context_fusion = context_gate * source_vector + (1.0 - context_gate
) * other_vector
return self.tanh(context_fusion)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4, 4]
|
ClassicMixtureDensityModule | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/pf/cpf6ns5fy7h5k6xkbfznbtvldnujjbndm4h6qzyudnvbwyxdbyht.py
# Topologically Sorted Source Nodes: [alpha], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# alpha => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_1, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 144
x2 = (xindex // 576)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (576*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (144 + x0 + (576*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (288 + x0 + (576*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (432 + x0 + (576*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/xv/cxvwzv7zvqvjztvnbdecf4kizdhqkixtkyjgymln4ihenkaudb4p.py
# Topologically Sorted Source Nodes: [alpha], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# alpha => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 144
x2 = (xindex // 576)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (576*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (144 + x0 + (576*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (288 + x0 + (576*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (432 + x0 + (576*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (36, 4), (4, 1))
assert_size_stride(primals_2, (36, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 36), (36, 1), torch.float32)
# Topologically Sorted Source Nodes: [p], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 36), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 36), (576, 144, 36, 1), torch.float32)
# Topologically Sorted Source Nodes: [alpha], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf0, buf1, 2304, grid=grid(2304), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4, 36), (576, 144, 36, 1), torch.float32)
# Topologically Sorted Source Nodes: [alpha], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf1, buf2, 2304, grid=grid(2304), stream=stream0)
del buf1
buf3 = empty_strided_cuda((4, 0, 4, 36), (0, 144, 36, 1), torch.float32)
return (buf2, reinterpret_tensor(buf0, (0, 4, 4), (0, 0, 0), 576), reinterpret_tensor(buf3, (0, 4, 4), (0, 0, 0), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((36, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((36, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class ClassicMixtureDensityModule(nn.Module):
def __init__(self, dim_input, dim_output, num_components):
super(ClassicMixtureDensityModule, self).__init__()
self.dim_input = dim_input
self.dim_output = dim_output
self.M = num_components
self.layer_mapping = nn.Linear(dim_input, (2 * dim_output + 1) *
num_components)
self.layer_alpha = nn.Softmax(dim=1)
def forward(self, x):
p = self.layer_mapping(x)
alpha = self.layer_alpha(p[:, :self.M])
mu = p[:, self.M:(self.dim_output + 1) * self.M]
sigma = torch.exp(p[:, (self.dim_output + 1) * self.M:])
mu = mu.view(-1, self.M, self.dim_output)
sigma = sigma.view(-1, self.M, self.dim_output)
return alpha, mu, sigma
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim_input': 4, 'dim_output': 4, 'num_components': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 2304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 144
x2 = xindex // 576
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 576 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (144 + x0 + 576 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (288 + x0 + 576 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (432 + x0 + 576 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 2304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 144
x2 = xindex // 576
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 576 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (144 + x0 + 576 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (288 + x0 + 576 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (432 + x0 + 576 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (36, 4), (4, 1))
assert_size_stride(primals_2, (36,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 36), (36, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 36), (1, 4),
0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 36), (576, 144, 36, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(2304)](buf0, buf1, 2304, XBLOCK=
128, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 36), (576, 144, 36, 1), torch.
float32)
triton_poi_fused__softmax_1[grid(2304)](buf1, buf2, 2304, XBLOCK=
128, num_warps=4, num_stages=1)
del buf1
buf3 = empty_strided_cuda((4, 0, 4, 36), (0, 144, 36, 1), torch.float32
)
return buf2, reinterpret_tensor(buf0, (0, 4, 4), (0, 0, 0), 576
), reinterpret_tensor(buf3, (0, 4, 4), (0, 0, 0), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, buf3
class ClassicMixtureDensityModuleNew(nn.Module):
def __init__(self, dim_input, dim_output, num_components):
super(ClassicMixtureDensityModuleNew, self).__init__()
self.dim_input = dim_input
self.dim_output = dim_output
self.M = num_components
self.layer_mapping = nn.Linear(dim_input, (2 * dim_output + 1) *
num_components)
self.layer_alpha = nn.Softmax(dim=1)
def forward(self, input_0):
primals_1 = self.layer_mapping.weight
primals_2 = self.layer_mapping.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0], output[1], output[2]
| Woodenonez/MultimodalMotionPred_SamplingWTACGF_Pytorch | ClassicMixtureDensityModule | false | 1,228 | [
"MIT"
] | 0 | 2be4f8aaaebb9ec80b29d4ff86146010a0192573 | https://github.com/Woodenonez/MultimodalMotionPred_SamplingWTACGF_Pytorch/tree/2be4f8aaaebb9ec80b29d4ff86146010a0192573 | import torch
from torch import nn
class Model(nn.Module):
def __init__(self, dim_input, dim_output, num_components):
super().__init__()
self.dim_input = dim_input
self.dim_output = dim_output
self.M = num_components
self.layer_mapping = nn.Linear(dim_input, (2 * dim_output + 1) *
num_components)
self.layer_alpha = nn.Softmax(dim=1)
def forward(self, x):
p = self.layer_mapping(x)
alpha = self.layer_alpha(p[:, :self.M])
mu = p[:, self.M:(self.dim_output + 1) * self.M]
sigma = torch.exp(p[:, (self.dim_output + 1) * self.M:])
mu = mu.view(-1, self.M, self.dim_output)
sigma = sigma.view(-1, self.M, self.dim_output)
return alpha, mu, sigma
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
EncoderLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ue/cueewzxuy4vxtpvu3vvsk7dj77iejxhffcljrvuqynhlke7j7x72.py
# Topologically Sorted Source Nodes: [truediv, attn], Original ATen: [aten.div, aten.clone]
# Source node to ATen node mapping:
# attn => clone
# truediv => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%permute_3, 2.0), kwargs = {})
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_div_0 = async_compile.triton('triton_poi_fused_clone_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x4), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ri/cricgdtr5c24l63g746gjtdd45qor3pkzmi7qmyygyd24ejrijb7.py
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# attn => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = (yindex // 16)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/tt/cttmvktt3m2x2nl56afa7l3abaxt7wlehowakdzngkhgs35f3n7u.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_11, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_11, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ry/cryn7ntc2gpkbfzbre3xh7lffx7zkbskw6oihbzsekkgajmdbki6.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => div_1, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/6b/c6busvilz5nn36jjet3bmw7cqddirh4sgalamjr3fsrp3sbsacfi.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# output => clone_3
# Graph fragment:
# %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask)
tl.store(out_ptr0 + (x4), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/jy/cjygxb2zqcocru7bqmlieffqzoyzqxczpp5w5pv3hrnjzzu6tq65.py
# Topologically Sorted Source Nodes: [adaptive_avg_pool1d], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# adaptive_avg_pool1d => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%unsqueeze, [-1, -2], True), kwargs = {})
triton_poi_fused_mean_5 = async_compile.triton('triton_poi_fused_mean_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp7 = tl.load(in_ptr0 + (4 + x0 + (64*x1)), xmask)
tmp8 = tl.load(in_ptr0 + (20 + x0 + (64*x1)), xmask)
tmp10 = tl.load(in_ptr0 + (36 + x0 + (64*x1)), xmask)
tmp12 = tl.load(in_ptr0 + (52 + x0 + (64*x1)), xmask)
tmp15 = tl.load(in_ptr0 + (8 + x0 + (64*x1)), xmask)
tmp16 = tl.load(in_ptr0 + (24 + x0 + (64*x1)), xmask)
tmp18 = tl.load(in_ptr0 + (40 + x0 + (64*x1)), xmask)
tmp20 = tl.load(in_ptr0 + (56 + x0 + (64*x1)), xmask)
tmp23 = tl.load(in_ptr0 + (12 + x0 + (64*x1)), xmask)
tmp24 = tl.load(in_ptr0 + (28 + x0 + (64*x1)), xmask)
tmp26 = tl.load(in_ptr0 + (44 + x0 + (64*x1)), xmask)
tmp28 = tl.load(in_ptr0 + (60 + x0 + (64*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp9 = tmp7 + tmp8
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp14 = tmp6 + tmp13
tmp17 = tmp15 + tmp16
tmp19 = tmp17 + tmp18
tmp21 = tmp19 + tmp20
tmp22 = tmp14 + tmp21
tmp25 = tmp23 + tmp24
tmp27 = tmp25 + tmp26
tmp29 = tmp27 + tmp28
tmp30 = tmp22 + tmp29
tmp31 = 4.0
tmp32 = tmp30 / tmp31
tl.store(out_ptr0 + (x2), tmp32, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/x3/cx357lgsa552jxzo7u6zzmeilbrh23o64hz7xr6jyzewgxsocd7h.py
# Topologically Sorted Source Nodes: [v_4], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# v_4 => amax_1, exp_1, sub_1
# Graph fragment:
# %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_16, [1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_16, %amax_1), kwargs = {})
# %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
triton_poi_fused__softmax_6 = async_compile.triton('triton_poi_fused__softmax_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/mu/cmuwbmg3ea5rpxshdtybg3pou76wgwsthvalkm6sfme6zednfanp.py
# Topologically Sorted Source Nodes: [v_4], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# v_4 => div_2, sum_3
# Graph fragment:
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [1], True), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_3), kwargs = {})
triton_poi_fused__softmax_7 = async_compile.triton('triton_poi_fused__softmax_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_7(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/a2/ca2efstpjdcv4e2tpybutvkyj7iakalaszbd2jsdzr4g2kbpjaka.py
# Topologically Sorted Source Nodes: [f, q_2], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# f => mul
# q_2 => sum_4
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_14, %expand_4), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1], True), kwargs = {})
triton_poi_fused_mul_sum_8 = async_compile.triton('triton_poi_fused_mul_sum_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sum_8(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16)
x3 = xindex % 16
x0 = xindex % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3 + (64*x2)), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x3 + (64*x2)), xmask)
tmp4 = tl.load(in_ptr1 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (32 + x3 + (64*x2)), xmask)
tmp8 = tl.load(in_ptr1 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x3 + (64*x2)), xmask)
tmp12 = tl.load(in_ptr1 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + (x4), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/cd/ccdmi4jlrnqerq7lq32jasuhlfdtt4lbo4wdtgtm25hvaqmsmmzd.py
# Topologically Sorted Source Nodes: [q_5, q_6], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# q_5 => add
# q_6 => var_mean
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_19, %primals_1), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [2]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_9 = async_compile.triton('triton_poi_fused_add_native_layer_norm_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_9(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/l2/cl2chp3c7xhcfuz6tuqw7ows2xquvgtlltaljzy5xnnjpkxrnzbj.py
# Topologically Sorted Source Nodes: [q_5, q_6], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# q_5 => add
# q_6 => add_1, add_2, mul_1, mul_2, rsqrt, sub_2
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_19, %primals_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-06), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %primals_8), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_9), kwargs = {})
triton_poi_fused_add_native_layer_norm_10 = async_compile.triton('triton_poi_fused_add_native_layer_norm_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-06
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/x4/cx4dy7nub5axbgswcjd7xtsxm4hlex3zmjlfcyucivazkjm63wl6.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_21,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_11 = async_compile.triton('triton_poi_fused_relu_threshold_backward_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_11(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ly/clych72evfl43h75debtcdhychoab7iw7f2ch2b5ebe3jjglsndg.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x_2 => add_3
# Graph fragment:
# %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_23, %add_2), kwargs = {})
triton_poi_fused_add_12 = async_compile.triton('triton_poi_fused_add_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_12', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_12(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/fr/cfrfshww2eair72sp7fsylxei2yqzgvvsvzcbwwxee7smq7nuolb.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# x_3 => add_4, rsqrt_1, var_mean_1
# Graph fragment:
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [2]), kwargs = {correction: 0, keepdim: True})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-06), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {})
triton_poi_fused_native_layer_norm_13 = async_compile.triton('triton_poi_fused_native_layer_norm_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_13(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-06
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/dr/cdrly2m2ajw4z7nsxvc4tnu6ks6b7s65jkv7p2qo565cfa7ogsb3.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# x_3 => add_4, add_5, mul_3, mul_4, rsqrt_1, sub_3, var_mean_1
# Graph fragment:
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [2]), kwargs = {correction: 0, keepdim: True})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-06), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %getitem_3), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %rsqrt_1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %primals_14), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_15), kwargs = {})
triton_poi_fused_native_layer_norm_14 = async_compile.triton('triton_poi_fused_native_layer_norm_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_14(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (16, 4), (4, 1))
assert_size_stride(primals_3, (16, 4), (4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (16, 4), (4, 1))
assert_size_stride(primals_6, (16, ), (1, ))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, 4), (4, 1))
assert_size_stride(primals_13, (4, ), (1, ))
assert_size_stride(primals_14, (4, ), (1, ))
assert_size_stride(primals_15, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf2)
del primals_4
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [truediv, attn], Original ATen: [aten.div, aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_div_0.run(buf0, buf3, 256, grid=grid(256), stream=stream0)
buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf1, buf4, 64, 4, grid=grid(64, 4), stream=stream0)
buf5 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf5, buf6, 256, grid=grid(256), stream=stream0)
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf6, buf7, 256, grid=grid(256), stream=stream0)
buf8 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf2, buf8, 256, grid=grid(256), stream=stream0)
buf9 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [adaptive_avg_pool1d], Original ATen: [aten.mean]
triton_poi_fused_mean_5.run(buf9, buf10, 16, grid=grid(16), stream=stream0)
buf11 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [v_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, reinterpret_tensor(buf10, (4, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf11)
del primals_6
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [v_4], Original ATen: [aten._softmax]
triton_poi_fused__softmax_6.run(buf11, buf12, 64, grid=grid(64), stream=stream0)
buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [v_4], Original ATen: [aten._softmax]
triton_poi_fused__softmax_7.run(buf12, buf13, 64, grid=grid(64), stream=stream0)
buf14 = reinterpret_tensor(buf12, (4, 1, 4, 4), (16, 1, 4, 1), 0); del buf12 # reuse
# Topologically Sorted Source Nodes: [f, q_2], Original ATen: [aten.mul, aten.sum]
triton_poi_fused_mul_sum_8.run(buf9, buf13, buf14, 64, grid=grid(64), stream=stream0)
buf15 = reinterpret_tensor(buf13, (16, 4), (4, 1), 0); del buf13 # reuse
# Topologically Sorted Source Nodes: [linear_4], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf14, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf15)
buf16 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf17 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [q_5, q_6], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_9.run(buf15, primals_1, buf16, buf17, 16, grid=grid(16), stream=stream0)
buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [q_5, q_6], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_10.run(buf15, primals_1, buf16, buf17, primals_8, primals_9, buf18, 64, grid=grid(64), stream=stream0)
del primals_9
buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf19)
buf20 = reinterpret_tensor(buf19, (4, 4, 4), (16, 4, 1), 0); del buf19 # reuse
buf26 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_11.run(buf20, primals_11, buf26, 64, grid=grid(64), stream=stream0)
del primals_11
buf21 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf20, (16, 4), (4, 1), 0), reinterpret_tensor(primals_12, (4, 4), (1, 4), 0), out=buf21)
buf22 = reinterpret_tensor(buf21, (4, 4, 4), (16, 4, 1), 0); del buf21 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.add]
triton_poi_fused_add_12.run(buf22, primals_13, buf18, 64, grid=grid(64), stream=stream0)
del primals_13
buf23 = buf17; del buf17 # reuse
buf24 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_13.run(buf22, buf23, buf24, 16, grid=grid(16), stream=stream0)
buf25 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_14.run(buf22, buf23, buf24, primals_14, primals_15, buf25, 64, grid=grid(64), stream=stream0)
del buf23
del buf24
del primals_15
return (buf25, buf7, primals_1, primals_8, primals_14, buf7, buf9, reinterpret_tensor(buf10, (4, 4), (4, 1), 0), buf11, reinterpret_tensor(buf14, (16, 4), (4, 1), 0), buf15, reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor(buf20, (16, 4), (4, 1), 0), buf22, primals_12, buf26, primals_10, primals_7, primals_5, reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class SPA(nn.Module):
""" Selective parallel attention """
def __init__(self, n_head: 'int'=8, d_v: 'int'=64):
super().__init__()
self.gap = nn.AdaptiveAvgPool1d(1)
self.sk = nn.Linear(d_v, n_head * d_v)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
bs, n_head, _lq, d_v = x.size()
u = x.sum(dim=1)
s = self.gap(u.transpose(1, 2)).view(bs, d_v)
v = self.sk(s)
v = v.view(bs, n_head, d_v)
v = self.softmax(v)
v = v.unsqueeze(2)
f = x * v.expand_as(x)
return f
class ScaledDotProductAttention(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature: 'float', attn_dropout: 'float'=0.1):
super().__init__()
self.temperature = temperature
self.attn_dropout = attn_dropout
self.dropout = nn.Dropout(attn_dropout)
def forward(self, q, k, v, mask=None):
attn = torch.matmul(q / self.temperature, k.transpose(2, 3))
if mask is not None:
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = self.dropout(F.softmax(attn, dim=-1))
output = torch.matmul(attn, v)
return output, attn
def __repr__(self):
tmpstr = self.__class__.__name__ + '('
tmpstr += 'temperature=' + str(self.temperature)
tmpstr += ', attn_dropout=' + str(self.attn_dropout)
tmpstr += ')'
return tmpstr
class MultiHeadAttention(nn.Module):
""" Multi-Head Attention module """
def __init__(self, n_head: 'int'=8, d_model: 'int'=512, d_k: 'int'=64,
d_v: 'int'=64, dropout: 'float'=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
if n_head > 1:
self.spa = SPA(n_head=n_head, d_v=d_v)
self.fc = nn.Linear(d_v, d_model, bias=False)
else:
self.fc = nn.Linear(n_head * d_v, d_model, bias=False)
self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-06)
def forward(self, q, k, v, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
if mask is not None:
mask = mask.unsqueeze(1)
q, attn = self.attention(q, k, v, mask=mask)
if n_head > 1:
q = self.spa(q)
q = q.sum(dim=1, keepdim=True)
q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1)
q = self.dropout(self.fc(q))
q += residual
q = self.layer_norm(q)
return q, attn
class PositionwiseFeedForward(nn.Module):
""" A two-feed-forward-layer module """
def __init__(self, d_in, d_hid, dropout=0.1):
super().__init__()
self.w_1 = nn.Linear(d_in, d_hid)
self.w_2 = nn.Linear(d_hid, d_in)
self.layer_norm = nn.LayerNorm(d_in, eps=1e-06)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
residual = x
x = self.w_2(F.relu(self.w_1(x)))
x = self.dropout(x)
x += residual
x = self.layer_norm(x)
return x
class EncoderLayer(nn.Module):
""" Compose with two layers """
def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):
super(EncoderLayer, self).__init__()
self.slf_attn = MultiHeadAttention(n_head, d_model, d_k, d_v,
dropout=dropout)
self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=
dropout)
def forward(self, enc_input, slf_attn_mask=None):
enc_output, enc_slf_attn = self.slf_attn(enc_input, enc_input,
enc_input, mask=slf_attn_mask)
enc_output = self.pos_ffn(enc_output)
return enc_output, enc_slf_attn
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'd_inner': 4, 'n_head': 4, 'd_k': 4, 'd_v': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x4, tmp2, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_mean_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp7 = tl.load(in_ptr0 + (4 + x0 + 64 * x1), xmask)
tmp8 = tl.load(in_ptr0 + (20 + x0 + 64 * x1), xmask)
tmp10 = tl.load(in_ptr0 + (36 + x0 + 64 * x1), xmask)
tmp12 = tl.load(in_ptr0 + (52 + x0 + 64 * x1), xmask)
tmp15 = tl.load(in_ptr0 + (8 + x0 + 64 * x1), xmask)
tmp16 = tl.load(in_ptr0 + (24 + x0 + 64 * x1), xmask)
tmp18 = tl.load(in_ptr0 + (40 + x0 + 64 * x1), xmask)
tmp20 = tl.load(in_ptr0 + (56 + x0 + 64 * x1), xmask)
tmp23 = tl.load(in_ptr0 + (12 + x0 + 64 * x1), xmask)
tmp24 = tl.load(in_ptr0 + (28 + x0 + 64 * x1), xmask)
tmp26 = tl.load(in_ptr0 + (44 + x0 + 64 * x1), xmask)
tmp28 = tl.load(in_ptr0 + (60 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp9 = tmp7 + tmp8
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp14 = tmp6 + tmp13
tmp17 = tmp15 + tmp16
tmp19 = tmp17 + tmp18
tmp21 = tmp19 + tmp20
tmp22 = tmp14 + tmp21
tmp25 = tmp23 + tmp24
tmp27 = tmp25 + tmp26
tmp29 = tmp27 + tmp28
tmp30 = tmp22 + tmp29
tmp31 = 4.0
tmp32 = tmp30 / tmp31
tl.store(out_ptr0 + x2, tmp32, xmask)
@triton.jit
def triton_poi_fused__softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_sum_8(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x3 = xindex % 16
x0 = xindex % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3 + 64 * x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x3 + 64 * x2), xmask)
tmp4 = tl.load(in_ptr1 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (32 + x3 + 64 * x2), xmask)
tmp8 = tl.load(in_ptr1 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x3 + 64 * x2), xmask)
tmp12 = tl.load(in_ptr1 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x4, tmp14, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_9(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_10(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-06
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_11(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_12(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_13(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-06
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_14(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (16, 4), (4, 1))
assert_size_stride(primals_3, (16, 4), (4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (16, 4), (4, 1))
assert_size_stride(primals_6, (16,), (1,))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 4), (4, 1))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (4,), (1,))
assert_size_stride(primals_15, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf2)
del primals_4
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_div_0[grid(256)](buf0, buf3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused_clone_1[grid(64, 4)](buf1, buf4, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf5 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0)
del buf1
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_3[grid(256)](buf6, buf7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf8 = buf6
del buf6
triton_poi_fused_clone_4[grid(256)](buf2, buf8, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf9 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
triton_poi_fused_mean_5[grid(16)](buf9, buf10, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf11 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_6, reinterpret_tensor(buf10, (4, 4), (
4, 1), 0), reinterpret_tensor(primals_5, (4, 16), (1, 4), 0),
alpha=1, beta=1, out=buf11)
del primals_6
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_6[grid(64)](buf11, buf12, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_7[grid(64)](buf12, buf13, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf14 = reinterpret_tensor(buf12, (4, 1, 4, 4), (16, 1, 4, 1), 0)
del buf12
triton_poi_fused_mul_sum_8[grid(64)](buf9, buf13, buf14, 64, XBLOCK
=64, num_warps=1, num_stages=1)
buf15 = reinterpret_tensor(buf13, (16, 4), (4, 1), 0)
del buf13
extern_kernels.mm(reinterpret_tensor(buf14, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf15)
buf16 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf17 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_9[grid(16)](buf15, primals_1,
buf16, buf17, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_10[grid(64)](buf15,
primals_1, buf16, buf17, primals_8, primals_9, buf18, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_9
buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf19)
buf20 = reinterpret_tensor(buf19, (4, 4, 4), (16, 4, 1), 0)
del buf19
buf26 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_11[grid(64)](buf20,
primals_11, buf26, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_11
buf21 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf20, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_12, (4, 4), (1, 4), 0), out=buf21)
buf22 = reinterpret_tensor(buf21, (4, 4, 4), (16, 4, 1), 0)
del buf21
triton_poi_fused_add_12[grid(64)](buf22, primals_13, buf18, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_13
buf23 = buf17
del buf17
buf24 = buf16
del buf16
triton_poi_fused_native_layer_norm_13[grid(16)](buf22, buf23, buf24,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf25 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_14[grid(64)](buf22, buf23, buf24,
primals_14, primals_15, buf25, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf23
del buf24
del primals_15
return (buf25, buf7, primals_1, primals_8, primals_14, buf7, buf9,
reinterpret_tensor(buf10, (4, 4), (4, 1), 0), buf11,
reinterpret_tensor(buf14, (16, 4), (4, 1), 0), buf15,
reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor(
buf20, (16, 4), (4, 1), 0), buf22, primals_12, buf26, primals_10,
primals_7, primals_5, reinterpret_tensor(buf8, (16, 4, 4), (16, 1,
4), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0),
reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0))
class SPA(nn.Module):
""" Selective parallel attention """
def __init__(self, n_head: 'int'=8, d_v: 'int'=64):
super().__init__()
self.gap = nn.AdaptiveAvgPool1d(1)
self.sk = nn.Linear(d_v, n_head * d_v)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
bs, n_head, _lq, d_v = x.size()
u = x.sum(dim=1)
s = self.gap(u.transpose(1, 2)).view(bs, d_v)
v = self.sk(s)
v = v.view(bs, n_head, d_v)
v = self.softmax(v)
v = v.unsqueeze(2)
f = x * v.expand_as(x)
return f
class ScaledDotProductAttention(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature: 'float', attn_dropout: 'float'=0.1):
super().__init__()
self.temperature = temperature
self.attn_dropout = attn_dropout
self.dropout = nn.Dropout(attn_dropout)
def forward(self, q, k, v, mask=None):
attn = torch.matmul(q / self.temperature, k.transpose(2, 3))
if mask is not None:
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = self.dropout(F.softmax(attn, dim=-1))
output = torch.matmul(attn, v)
return output, attn
def __repr__(self):
tmpstr = self.__class__.__name__ + '('
tmpstr += 'temperature=' + str(self.temperature)
tmpstr += ', attn_dropout=' + str(self.attn_dropout)
tmpstr += ')'
return tmpstr
class MultiHeadAttention(nn.Module):
""" Multi-Head Attention module """
def __init__(self, n_head: 'int'=8, d_model: 'int'=512, d_k: 'int'=64,
d_v: 'int'=64, dropout: 'float'=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
if n_head > 1:
self.spa = SPA(n_head=n_head, d_v=d_v)
self.fc = nn.Linear(d_v, d_model, bias=False)
else:
self.fc = nn.Linear(n_head * d_v, d_model, bias=False)
self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-06)
def forward(self, q, k, v, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
if mask is not None:
mask = mask.unsqueeze(1)
q, attn = self.attention(q, k, v, mask=mask)
if n_head > 1:
q = self.spa(q)
q = q.sum(dim=1, keepdim=True)
q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1)
q = self.dropout(self.fc(q))
q += residual
q = self.layer_norm(q)
return q, attn
class PositionwiseFeedForward(nn.Module):
""" A two-feed-forward-layer module """
def __init__(self, d_in, d_hid, dropout=0.1):
super().__init__()
self.w_1 = nn.Linear(d_in, d_hid)
self.w_2 = nn.Linear(d_hid, d_in)
self.layer_norm = nn.LayerNorm(d_in, eps=1e-06)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
residual = x
x = self.w_2(F.relu(self.w_1(x)))
x = self.dropout(x)
x += residual
x = self.layer_norm(x)
return x
class EncoderLayerNew(nn.Module):
""" Compose with two layers """
def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):
super(EncoderLayerNew, self).__init__()
self.slf_attn = MultiHeadAttention(n_head, d_model, d_k, d_v,
dropout=dropout)
self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=
dropout)
def forward(self, input_0):
primals_2 = self.slf_attn.w_qs.weight
primals_3 = self.slf_attn.w_ks.weight
primals_4 = self.slf_attn.w_vs.weight
primals_5 = self.slf_attn.spa.sk.weight
primals_6 = self.slf_attn.spa.sk.bias
primals_7 = self.slf_attn.fc.weight
primals_8 = self.slf_attn.layer_norm.weight
primals_9 = self.slf_attn.layer_norm.bias
primals_10 = self.pos_ffn.w_1.weight
primals_11 = self.pos_ffn.w_1.bias
primals_12 = self.pos_ffn.w_2.weight
primals_13 = self.pos_ffn.w_2.bias
primals_14 = self.pos_ffn.layer_norm.weight
primals_15 = self.pos_ffn.layer_norm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15])
return output[0], output[1]
| WOMMOW/AIT | EncoderLayer | false | 1,229 | [
"MIT"
] | 0 | 305fe7962bf9c5c24b6854e3ff0b7e2e669bf5a5 | https://github.com/WOMMOW/AIT/tree/305fe7962bf9c5c24b6854e3ff0b7e2e669bf5a5 | import torch
import torch.nn as nn
import torch.nn.functional as F
class SPA(nn.Module):
""" Selective parallel attention """
def __init__(self, n_head: 'int'=8, d_v: 'int'=64):
super().__init__()
self.gap = nn.AdaptiveAvgPool1d(1)
self.sk = nn.Linear(d_v, n_head * d_v)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
bs, n_head, _lq, d_v = x.size()
u = x.sum(dim=1)
s = self.gap(u.transpose(1, 2)).view(bs, d_v)
v = self.sk(s)
v = v.view(bs, n_head, d_v)
v = self.softmax(v)
v = v.unsqueeze(2)
f = x * v.expand_as(x)
return f
class ScaledDotProductAttention(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature: 'float', attn_dropout: 'float'=0.1):
super().__init__()
self.temperature = temperature
self.attn_dropout = attn_dropout
self.dropout = nn.Dropout(attn_dropout)
def forward(self, q, k, v, mask=None):
attn = torch.matmul(q / self.temperature, k.transpose(2, 3))
if mask is not None:
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = self.dropout(F.softmax(attn, dim=-1))
output = torch.matmul(attn, v)
return output, attn
def __repr__(self):
tmpstr = self.__class__.__name__ + '('
tmpstr += 'temperature=' + str(self.temperature)
tmpstr += ', attn_dropout=' + str(self.attn_dropout)
tmpstr += ')'
return tmpstr
class MultiHeadAttention(nn.Module):
""" Multi-Head Attention module """
def __init__(self, n_head: 'int'=8, d_model: 'int'=512, d_k: 'int'=64,
d_v: 'int'=64, dropout: 'float'=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
if n_head > 1:
self.spa = SPA(n_head=n_head, d_v=d_v)
self.fc = nn.Linear(d_v, d_model, bias=False)
else:
self.fc = nn.Linear(n_head * d_v, d_model, bias=False)
self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-06)
def forward(self, q, k, v, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
if mask is not None:
mask = mask.unsqueeze(1)
q, attn = self.attention(q, k, v, mask=mask)
if n_head > 1:
q = self.spa(q)
q = q.sum(dim=1, keepdim=True)
q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1)
q = self.dropout(self.fc(q))
q += residual
q = self.layer_norm(q)
return q, attn
class PositionwiseFeedForward(nn.Module):
""" A two-feed-forward-layer module """
def __init__(self, d_in, d_hid, dropout=0.1):
super().__init__()
self.w_1 = nn.Linear(d_in, d_hid)
self.w_2 = nn.Linear(d_hid, d_in)
self.layer_norm = nn.LayerNorm(d_in, eps=1e-06)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
residual = x
x = self.w_2(F.relu(self.w_1(x)))
x = self.dropout(x)
x += residual
x = self.layer_norm(x)
return x
class Model(nn.Module):
""" Compose with two layers """
def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):
super().__init__()
self.slf_attn = MultiHeadAttention(n_he
# ... truncated (>4000 chars) for memory efficiency |
Mean | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/k7/ck7f3e36x4bp7ysaeucdkbkabvflugky7lt72frthtbqzwdsmcfq.py
# Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# mean => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [4]), kwargs = {})
triton_poi_fused_mean_0 = async_compile.triton('triton_poi_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_poi_fused_mean_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Mean(nn.Module):
def __init__(self, dim, keep_dim=False):
super(Mean, self).__init__()
self.dim = dim
self.keep_dim = keep_dim
def forward(self, input):
return input.mean(self.dim, self.keep_dim)
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class MeanNew(nn.Module):
def __init__(self, dim, keep_dim=False):
super(MeanNew, self).__init__()
self.dim = dim
self.keep_dim = keep_dim
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| WillyChen123/CDFNet | Mean | false | 1,230 | [
"MIT"
] | 0 | 12d6b288aa2a8301683395a75bd44a7be44b7f2a | https://github.com/WillyChen123/CDFNet/tree/12d6b288aa2a8301683395a75bd44a7be44b7f2a | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, dim, keep_dim=False):
super().__init__()
self.dim = dim
self.keep_dim = keep_dim
def forward(self, input):
return input.mean(self.dim, self.keep_dim)
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
CLeakyReLU | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/n5/cn53c6d36bm2o6wr33epyebwkqx7owzyf77kp5pts3jxdcj6obrf.py
# Topologically Sorted Source Nodes: [leaky_relu], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# leaky_relu => gt, mul, where
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.01), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %arg0_1, %mul), kwargs = {})
triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.01
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [leaky_relu], Original ATen: [aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_leaky_relu_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [leaky_relu_1], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_0.run(arg1_1, buf1, 256, grid=grid(256), stream=stream0)
del arg1_1
return (buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class CLeakyReLU(nn.LeakyReLU):
def forward(self, xr, xi):
return F.leaky_relu(xr, self.negative_slope, self.inplace
), F.leaky_relu(xi, self.negative_slope, self.inplace)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.01
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_leaky_relu_0[grid(256)](arg1_1, buf1, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del arg1_1
return buf0, buf1
class CLeakyReLUNew(nn.LeakyReLU):
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0], output[1]
| X-CCS/TensorFlowTTS | CLeakyReLU | false | 1,231 | [
"Apache-2.0"
] | 0 | 157fab4cbcf11a68ff62f6ec364af43447247c76 | https://github.com/X-CCS/TensorFlowTTS/tree/157fab4cbcf11a68ff62f6ec364af43447247c76 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.LeakyReLU):
def forward(self, xr, xi):
return F.leaky_relu(xr, self.negative_slope, self.inplace
), F.leaky_relu(xi, self.negative_slope, self.inplace)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
KL_Triplet_Loss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/2k/c2krb47vwrdba4ls4nroyivzpywor6nfyjzvpahzquvj3x5efyft.py
# Topologically Sorted Source Nodes: [loss], Original ATen: [aten.xlogy, aten.mul, aten.sub, aten.mean]
# Source node to ATen node mapping:
# loss => eq, full_default, full_default_1, isnan, log, mean, mul, mul_1, sub, where, where_1
# Graph fragment:
# %isnan : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%view_1,), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], nan), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%view_1, 0), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%view_1,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %log), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %mul_1), kwargs = {})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%isnan, %full_default_1, %where), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %view), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %mul), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub,), kwargs = {})
triton_per_fused_mean_mul_sub_xlogy_0 = async_compile.triton('triton_per_fused_mean_mul_sub_xlogy_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_mul_sub_xlogy_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_mul_sub_xlogy_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp9 = tl.load(in_ptr1 + (r0), None)
tmp1 = libdevice.isnan(tmp0).to(tl.int1)
tmp2 = 0.0
tmp3 = tmp0 == tmp2
tmp4 = tl_math.log(tmp0)
tmp5 = tmp0 * tmp4
tmp6 = tl.where(tmp3, tmp2, tmp5)
tmp7 = float("nan")
tmp8 = tl.where(tmp1, tmp7, tmp6)
tmp10 = tmp0 * tmp9
tmp11 = tmp8 - tmp10
tmp12 = tl.broadcast_to(tmp11, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tmp15 = 256.0
tmp16 = tmp14 / tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp16, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [loss], Original ATen: [aten.xlogy, aten.mul, aten.sub, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_mul_sub_xlogy_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class KL_Triplet_Loss(nn.Module):
def __init__(self, symmetric=True):
"""
:param symmetric: if symmetric, we will use JS Divergence, if not KL Divergence will be used.
"""
super().__init__()
self.symmetric = symmetric
self.engine = nn.KLDivLoss()
def forward(self, x, y):
if len(x.shape) == 4 and len(y.shape) == 4:
x = x.view(x.size(0) * x.size(1), -1)
y = y.view(y.size(0) * y.size(1), -1)
elif len(x.shape) == 2 and len(y.shape) == 2:
pass
else:
raise TypeError('We need a tensor of either rank 2 or rank 4.')
if self.symmetric:
loss = self.engine(x, y)
else:
loss = self.engine(x, y) + self.engine(y, x)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_mul_sub_xlogy_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp9 = tl.load(in_ptr1 + r0, None)
tmp1 = libdevice.isnan(tmp0).to(tl.int1)
tmp2 = 0.0
tmp3 = tmp0 == tmp2
tmp4 = tl_math.log(tmp0)
tmp5 = tmp0 * tmp4
tmp6 = tl.where(tmp3, tmp2, tmp5)
tmp7 = float('nan')
tmp8 = tl.where(tmp1, tmp7, tmp6)
tmp10 = tmp0 * tmp9
tmp11 = tmp8 - tmp10
tmp12 = tl.broadcast_to(tmp11, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tmp15 = 256.0
tmp16 = tmp14 / tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp16, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_mul_sub_xlogy_0[grid(1)](buf1, arg1_1, arg0_1,
1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class KL_Triplet_LossNew(nn.Module):
def __init__(self, symmetric=True):
"""
:param symmetric: if symmetric, we will use JS Divergence, if not KL Divergence will be used.
"""
super().__init__()
self.symmetric = symmetric
self.engine = nn.KLDivLoss()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| WorksApplications/omni_torch | KL_Triplet_Loss | false | 1,232 | [
"Apache-2.0"
] | 0 | 10b689d794c8f485e38c765303ef018da17bc641 | https://github.com/WorksApplications/omni_torch/tree/10b689d794c8f485e38c765303ef018da17bc641 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, symmetric=True):
"""
:param symmetric: if symmetric, we will use JS Divergence, if not KL Divergence will be used.
"""
super().__init__()
self.symmetric = symmetric
self.engine = nn.KLDivLoss()
def forward(self, x, y):
if len(x.shape) == 4 and len(y.shape) == 4:
x = x.view(x.size(0) * x.size(1), -1)
y = y.view(y.size(0) * y.size(1), -1)
elif len(x.shape) == 2 and len(y.shape) == 2:
pass
else:
raise TypeError('We need a tensor of either rank 2 or rank 4.')
if self.symmetric:
loss = self.engine(x, y)
else:
loss = self.engine(x, y) + self.engine(y, x)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
DecoderLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ue/cueewzxuy4vxtpvu3vvsk7dj77iejxhffcljrvuqynhlke7j7x72.py
# Topologically Sorted Source Nodes: [truediv, attn], Original ATen: [aten.div, aten.clone]
# Source node to ATen node mapping:
# attn => clone
# truediv => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%permute_3, 2.0), kwargs = {})
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_div_0 = async_compile.triton('triton_poi_fused_clone_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x4), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ri/cricgdtr5c24l63g746gjtdd45qor3pkzmi7qmyygyd24ejrijb7.py
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# attn => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = (yindex // 16)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/tt/cttmvktt3m2x2nl56afa7l3abaxt7wlehowakdzngkhgs35f3n7u.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_11, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_11, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ry/cryn7ntc2gpkbfzbre3xh7lffx7zkbskw6oihbzsekkgajmdbki6.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => div_1, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/6b/c6busvilz5nn36jjet3bmw7cqddirh4sgalamjr3fsrp3sbsacfi.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# output => clone_3
# Graph fragment:
# %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask)
tl.store(out_ptr0 + (x4), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/jy/cjygxb2zqcocru7bqmlieffqzoyzqxczpp5w5pv3hrnjzzu6tq65.py
# Topologically Sorted Source Nodes: [adaptive_avg_pool1d], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# adaptive_avg_pool1d => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%unsqueeze, [-1, -2], True), kwargs = {})
triton_poi_fused_mean_5 = async_compile.triton('triton_poi_fused_mean_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp7 = tl.load(in_ptr0 + (4 + x0 + (64*x1)), xmask)
tmp8 = tl.load(in_ptr0 + (20 + x0 + (64*x1)), xmask)
tmp10 = tl.load(in_ptr0 + (36 + x0 + (64*x1)), xmask)
tmp12 = tl.load(in_ptr0 + (52 + x0 + (64*x1)), xmask)
tmp15 = tl.load(in_ptr0 + (8 + x0 + (64*x1)), xmask)
tmp16 = tl.load(in_ptr0 + (24 + x0 + (64*x1)), xmask)
tmp18 = tl.load(in_ptr0 + (40 + x0 + (64*x1)), xmask)
tmp20 = tl.load(in_ptr0 + (56 + x0 + (64*x1)), xmask)
tmp23 = tl.load(in_ptr0 + (12 + x0 + (64*x1)), xmask)
tmp24 = tl.load(in_ptr0 + (28 + x0 + (64*x1)), xmask)
tmp26 = tl.load(in_ptr0 + (44 + x0 + (64*x1)), xmask)
tmp28 = tl.load(in_ptr0 + (60 + x0 + (64*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp9 = tmp7 + tmp8
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp14 = tmp6 + tmp13
tmp17 = tmp15 + tmp16
tmp19 = tmp17 + tmp18
tmp21 = tmp19 + tmp20
tmp22 = tmp14 + tmp21
tmp25 = tmp23 + tmp24
tmp27 = tmp25 + tmp26
tmp29 = tmp27 + tmp28
tmp30 = tmp22 + tmp29
tmp31 = 4.0
tmp32 = tmp30 / tmp31
tl.store(out_ptr0 + (x2), tmp32, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/x3/cx357lgsa552jxzo7u6zzmeilbrh23o64hz7xr6jyzewgxsocd7h.py
# Topologically Sorted Source Nodes: [v_4], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# v_4 => amax_1, exp_1, sub_1
# Graph fragment:
# %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_16, [1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_16, %amax_1), kwargs = {})
# %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
triton_poi_fused__softmax_6 = async_compile.triton('triton_poi_fused__softmax_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/mu/cmuwbmg3ea5rpxshdtybg3pou76wgwsthvalkm6sfme6zednfanp.py
# Topologically Sorted Source Nodes: [v_4], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# v_4 => div_2, sum_3
# Graph fragment:
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [1], True), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_3), kwargs = {})
triton_poi_fused__softmax_7 = async_compile.triton('triton_poi_fused__softmax_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_7(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/a2/ca2efstpjdcv4e2tpybutvkyj7iakalaszbd2jsdzr4g2kbpjaka.py
# Topologically Sorted Source Nodes: [f, q_2], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# f => mul
# q_2 => sum_4
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_14, %expand_4), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1], True), kwargs = {})
triton_poi_fused_mul_sum_8 = async_compile.triton('triton_poi_fused_mul_sum_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sum_8(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16)
x3 = xindex % 16
x0 = xindex % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3 + (64*x2)), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x3 + (64*x2)), xmask)
tmp4 = tl.load(in_ptr1 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (32 + x3 + (64*x2)), xmask)
tmp8 = tl.load(in_ptr1 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x3 + (64*x2)), xmask)
tmp12 = tl.load(in_ptr1 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + (x4), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/cd/ccdmi4jlrnqerq7lq32jasuhlfdtt4lbo4wdtgtm25hvaqmsmmzd.py
# Topologically Sorted Source Nodes: [q_5, q_6], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# q_5 => add
# q_6 => var_mean
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_19, %primals_1), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [2]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_9 = async_compile.triton('triton_poi_fused_add_native_layer_norm_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_9(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/l2/cl2chp3c7xhcfuz6tuqw7ows2xquvgtlltaljzy5xnnjpkxrnzbj.py
# Topologically Sorted Source Nodes: [q_5, q_6], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# q_5 => add
# q_6 => add_1, add_2, mul_1, mul_2, rsqrt, sub_2
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_19, %primals_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-06), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %primals_8), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_9), kwargs = {})
triton_poi_fused_add_native_layer_norm_10 = async_compile.triton('triton_poi_fused_add_native_layer_norm_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-06
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/lk/clkhemk6ohccddvynxghfpav5gis3lty2djzmbxeav7np4a6f65z.py
# Topologically Sorted Source Nodes: [q_12], Original ATen: [aten.add]
# Source node to ATen node mapping:
# q_12 => add_3
# Graph fragment:
# %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_39, %add_2), kwargs = {})
triton_poi_fused_add_11 = async_compile.triton('triton_poi_fused_add_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ny/cnyrovglbw3nyhttef3ipiiitcatr5iwqeg7y3cebawjg26ucer5.py
# Topologically Sorted Source Nodes: [q_13], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# q_13 => add_4, rsqrt_1, var_mean_1
# Graph fragment:
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [2]), kwargs = {correction: 0, keepdim: True})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-06), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {})
triton_poi_fused_native_layer_norm_12 = async_compile.triton('triton_poi_fused_native_layer_norm_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_12(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-06
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/oo/cooyneqtg4bnijp62rdzv4cyndryh7kceyhls22gkckqxzmgx4ye.py
# Topologically Sorted Source Nodes: [q_13], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# q_13 => add_4, add_5, mul_4, mul_5, rsqrt_1, sub_5, var_mean_1
# Graph fragment:
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [2]), kwargs = {correction: 0, keepdim: True})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-06), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %getitem_3), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_5, %rsqrt_1), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, %primals_17), kwargs = {})
# %add_5 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, %primals_18), kwargs = {})
triton_poi_fused_native_layer_norm_13 = async_compile.triton('triton_poi_fused_native_layer_norm_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_13(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/aw/cawklfxsta65lswerzf7yv2ha7ywva5wez4kjiiwjwvkzrwph3qz.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_41,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_14 = async_compile.triton('triton_poi_fused_relu_threshold_backward_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_14', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_14(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/75/c75zatca54ivfmbwvxvmzhmwqww2cguxqahm6wtfcv6nfwu4biij.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x_2 => add_6
# Graph fragment:
# %add_6 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_43, %add_5), kwargs = {})
triton_poi_fused_add_15 = async_compile.triton('triton_poi_fused_add_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_15', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_15(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (16, 4), (4, 1))
assert_size_stride(primals_3, (16, 4), (4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (16, 4), (4, 1))
assert_size_stride(primals_6, (16, ), (1, ))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_11, (16, 4), (4, 1))
assert_size_stride(primals_12, (16, 4), (4, 1))
assert_size_stride(primals_13, (16, 4), (4, 1))
assert_size_stride(primals_14, (16, 4), (4, 1))
assert_size_stride(primals_15, (16, ), (1, ))
assert_size_stride(primals_16, (4, 4), (4, 1))
assert_size_stride(primals_17, (4, ), (1, ))
assert_size_stride(primals_18, (4, ), (1, ))
assert_size_stride(primals_19, (4, 4), (4, 1))
assert_size_stride(primals_20, (4, ), (1, ))
assert_size_stride(primals_21, (4, 4), (4, 1))
assert_size_stride(primals_22, (4, ), (1, ))
assert_size_stride(primals_23, (4, ), (1, ))
assert_size_stride(primals_24, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf2)
del primals_4
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [truediv, attn], Original ATen: [aten.div, aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_div_0.run(buf0, buf3, 256, grid=grid(256), stream=stream0)
buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf1, buf4, 64, 4, grid=grid(64, 4), stream=stream0)
buf5 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf5, buf6, 256, grid=grid(256), stream=stream0)
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf6, buf7, 256, grid=grid(256), stream=stream0)
buf8 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf2, buf8, 256, grid=grid(256), stream=stream0)
buf9 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [adaptive_avg_pool1d], Original ATen: [aten.mean]
triton_poi_fused_mean_5.run(buf9, buf10, 16, grid=grid(16), stream=stream0)
buf11 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [v_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, reinterpret_tensor(buf10, (4, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf11)
del primals_6
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [v_4], Original ATen: [aten._softmax]
triton_poi_fused__softmax_6.run(buf11, buf12, 64, grid=grid(64), stream=stream0)
buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [v_4], Original ATen: [aten._softmax]
triton_poi_fused__softmax_7.run(buf12, buf13, 64, grid=grid(64), stream=stream0)
buf14 = reinterpret_tensor(buf12, (4, 1, 4, 4), (16, 1, 4, 1), 0); del buf12 # reuse
# Topologically Sorted Source Nodes: [f, q_2], Original ATen: [aten.mul, aten.sum]
triton_poi_fused_mul_sum_8.run(buf9, buf13, buf14, 64, grid=grid(64), stream=stream0)
buf15 = reinterpret_tensor(buf13, (16, 4), (4, 1), 0); del buf13 # reuse
# Topologically Sorted Source Nodes: [linear_4], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf14, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf15)
buf16 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf17 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [q_5, q_6], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_9.run(buf15, primals_1, buf16, buf17, 16, grid=grid(16), stream=stream0)
buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [q_5, q_6], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_10.run(buf15, primals_1, buf16, buf17, primals_8, primals_9, buf18, 64, grid=grid(64), stream=stream0)
del primals_9
buf19 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_5], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 16), (1, 4), 0), out=buf19)
buf20 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_6], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_12, (4, 16), (1, 4), 0), out=buf20)
del primals_12
buf21 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_7], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 16), (1, 4), 0), out=buf21)
del primals_13
buf22 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [truediv_1, attn_2], Original ATen: [aten.div, aten.clone]
triton_poi_fused_clone_div_0.run(buf19, buf22, 256, grid=grid(256), stream=stream0)
buf23 = reinterpret_tensor(buf19, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf19 # reuse
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf20, buf23, 64, 4, grid=grid(64, 4), stream=stream0)
buf24 = reinterpret_tensor(buf20, (16, 4, 4), (16, 4, 1), 0); del buf20 # reuse
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf22, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf23, (16, 4, 4), (16, 4, 1), 0), out=buf24)
buf25 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf24, buf25, 256, grid=grid(256), stream=stream0)
buf26 = reinterpret_tensor(buf24, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf24 # reuse
# Topologically Sorted Source Nodes: [softmax_2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf25, buf26, 256, grid=grid(256), stream=stream0)
buf27 = buf25; del buf25 # reuse
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf21, buf27, 256, grid=grid(256), stream=stream0)
buf28 = reinterpret_tensor(buf21, (16, 4, 4), (16, 4, 1), 0); del buf21 # reuse
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf26, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf27, (16, 4, 4), (16, 4, 1), 0), out=buf28)
buf29 = reinterpret_tensor(buf17, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf17 # reuse
# Topologically Sorted Source Nodes: [adaptive_avg_pool1d_1], Original ATen: [aten.mean]
triton_poi_fused_mean_5.run(buf28, buf29, 16, grid=grid(16), stream=stream0)
buf30 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [v_8], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_15, reinterpret_tensor(buf29, (4, 4), (4, 1), 0), reinterpret_tensor(primals_14, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf30)
del primals_15
buf31 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [v_10], Original ATen: [aten._softmax]
triton_poi_fused__softmax_6.run(buf30, buf31, 64, grid=grid(64), stream=stream0)
buf32 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [v_10], Original ATen: [aten._softmax]
triton_poi_fused__softmax_7.run(buf31, buf32, 64, grid=grid(64), stream=stream0)
buf33 = reinterpret_tensor(buf31, (4, 1, 4, 4), (16, 1, 4, 1), 0); del buf31 # reuse
# Topologically Sorted Source Nodes: [f_1, q_9], Original ATen: [aten.mul, aten.sum]
triton_poi_fused_mul_sum_8.run(buf28, buf32, buf33, 64, grid=grid(64), stream=stream0)
buf34 = reinterpret_tensor(buf32, (16, 4), (4, 1), 0); del buf32 # reuse
# Topologically Sorted Source Nodes: [linear_9], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf33, (16, 4), (4, 1), 0), reinterpret_tensor(primals_16, (4, 4), (1, 4), 0), out=buf34)
buf35 = reinterpret_tensor(buf34, (4, 4, 4), (16, 4, 1), 0); del buf34 # reuse
# Topologically Sorted Source Nodes: [q_12], Original ATen: [aten.add]
triton_poi_fused_add_11.run(buf35, buf18, 64, grid=grid(64), stream=stream0)
buf36 = buf16; del buf16 # reuse
buf37 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [q_13], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_12.run(buf35, buf36, buf37, 16, grid=grid(16), stream=stream0)
buf38 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [q_13], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_13.run(buf35, buf36, buf37, primals_17, primals_18, buf38, 64, grid=grid(64), stream=stream0)
del primals_18
buf39 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf38, (16, 4), (4, 1), 0), reinterpret_tensor(primals_19, (4, 4), (1, 4), 0), out=buf39)
buf40 = reinterpret_tensor(buf39, (4, 4, 4), (16, 4, 1), 0); del buf39 # reuse
buf46 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_14.run(buf40, primals_20, buf46, 64, grid=grid(64), stream=stream0)
del primals_20
buf41 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf40, (16, 4), (4, 1), 0), reinterpret_tensor(primals_21, (4, 4), (1, 4), 0), out=buf41)
buf42 = reinterpret_tensor(buf41, (4, 4, 4), (16, 4, 1), 0); del buf41 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.add]
triton_poi_fused_add_15.run(buf42, primals_22, buf38, 64, grid=grid(64), stream=stream0)
del primals_22
buf43 = buf37; del buf37 # reuse
buf44 = buf36; del buf36 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_12.run(buf42, buf43, buf44, 16, grid=grid(16), stream=stream0)
buf45 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_13.run(buf42, buf43, buf44, primals_23, primals_24, buf45, 64, grid=grid(64), stream=stream0)
del buf43
del buf44
del primals_24
return (buf45, buf7, buf26, primals_1, primals_8, primals_17, primals_23, buf7, buf9, reinterpret_tensor(buf10, (4, 4), (4, 1), 0), buf11, reinterpret_tensor(buf14, (16, 4), (4, 1), 0), buf15, reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (16, 4), (4, 1), 0), buf26, buf28, reinterpret_tensor(buf29, (4, 4), (4, 1), 0), buf30, reinterpret_tensor(buf33, (16, 4), (4, 1), 0), buf35, reinterpret_tensor(buf38, (16, 4), (4, 1), 0), reinterpret_tensor(buf40, (16, 4), (4, 1), 0), buf42, primals_21, buf46, primals_19, primals_16, primals_14, reinterpret_tensor(buf27, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf22, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf23, (16, 4, 4), (16, 1, 4), 0), primals_11, primals_7, primals_5, reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class SPA(nn.Module):
""" Selective parallel attention """
def __init__(self, n_head: 'int'=8, d_v: 'int'=64):
super().__init__()
self.gap = nn.AdaptiveAvgPool1d(1)
self.sk = nn.Linear(d_v, n_head * d_v)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
bs, n_head, _lq, d_v = x.size()
u = x.sum(dim=1)
s = self.gap(u.transpose(1, 2)).view(bs, d_v)
v = self.sk(s)
v = v.view(bs, n_head, d_v)
v = self.softmax(v)
v = v.unsqueeze(2)
f = x * v.expand_as(x)
return f
class ScaledDotProductAttention(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature: 'float', attn_dropout: 'float'=0.1):
super().__init__()
self.temperature = temperature
self.attn_dropout = attn_dropout
self.dropout = nn.Dropout(attn_dropout)
def forward(self, q, k, v, mask=None):
attn = torch.matmul(q / self.temperature, k.transpose(2, 3))
if mask is not None:
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = self.dropout(F.softmax(attn, dim=-1))
output = torch.matmul(attn, v)
return output, attn
def __repr__(self):
tmpstr = self.__class__.__name__ + '('
tmpstr += 'temperature=' + str(self.temperature)
tmpstr += ', attn_dropout=' + str(self.attn_dropout)
tmpstr += ')'
return tmpstr
class MultiHeadAttention(nn.Module):
""" Multi-Head Attention module """
def __init__(self, n_head: 'int'=8, d_model: 'int'=512, d_k: 'int'=64,
d_v: 'int'=64, dropout: 'float'=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
if n_head > 1:
self.spa = SPA(n_head=n_head, d_v=d_v)
self.fc = nn.Linear(d_v, d_model, bias=False)
else:
self.fc = nn.Linear(n_head * d_v, d_model, bias=False)
self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-06)
def forward(self, q, k, v, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
if mask is not None:
mask = mask.unsqueeze(1)
q, attn = self.attention(q, k, v, mask=mask)
if n_head > 1:
q = self.spa(q)
q = q.sum(dim=1, keepdim=True)
q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1)
q = self.dropout(self.fc(q))
q += residual
q = self.layer_norm(q)
return q, attn
class PositionwiseFeedForward(nn.Module):
""" A two-feed-forward-layer module """
def __init__(self, d_in, d_hid, dropout=0.1):
super().__init__()
self.w_1 = nn.Linear(d_in, d_hid)
self.w_2 = nn.Linear(d_hid, d_in)
self.layer_norm = nn.LayerNorm(d_in, eps=1e-06)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
residual = x
x = self.w_2(F.relu(self.w_1(x)))
x = self.dropout(x)
x += residual
x = self.layer_norm(x)
return x
class DecoderLayer(nn.Module):
""" Compose with three layers """
def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):
super(DecoderLayer, self).__init__()
self.slf_attn = MultiHeadAttention(n_head, d_model, d_k, d_v,
dropout=dropout)
self.enc_attn = MultiHeadAttention(n_head, d_model, d_k, d_v,
dropout=dropout)
self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=
dropout)
def forward(self, dec_input, enc_output, slf_attn_mask=None,
dec_enc_attn_mask=None):
dec_output, dec_slf_attn = self.slf_attn(dec_input, dec_input,
dec_input, mask=slf_attn_mask)
dec_output, dec_enc_attn = self.enc_attn(dec_output, enc_output,
enc_output, mask=dec_enc_attn_mask)
dec_output = self.pos_ffn(dec_output)
return dec_output, dec_slf_attn, dec_enc_attn
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'd_inner': 4, 'n_head': 4, 'd_k': 4, 'd_v': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x4, tmp2, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_mean_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp7 = tl.load(in_ptr0 + (4 + x0 + 64 * x1), xmask)
tmp8 = tl.load(in_ptr0 + (20 + x0 + 64 * x1), xmask)
tmp10 = tl.load(in_ptr0 + (36 + x0 + 64 * x1), xmask)
tmp12 = tl.load(in_ptr0 + (52 + x0 + 64 * x1), xmask)
tmp15 = tl.load(in_ptr0 + (8 + x0 + 64 * x1), xmask)
tmp16 = tl.load(in_ptr0 + (24 + x0 + 64 * x1), xmask)
tmp18 = tl.load(in_ptr0 + (40 + x0 + 64 * x1), xmask)
tmp20 = tl.load(in_ptr0 + (56 + x0 + 64 * x1), xmask)
tmp23 = tl.load(in_ptr0 + (12 + x0 + 64 * x1), xmask)
tmp24 = tl.load(in_ptr0 + (28 + x0 + 64 * x1), xmask)
tmp26 = tl.load(in_ptr0 + (44 + x0 + 64 * x1), xmask)
tmp28 = tl.load(in_ptr0 + (60 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp9 = tmp7 + tmp8
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp14 = tmp6 + tmp13
tmp17 = tmp15 + tmp16
tmp19 = tmp17 + tmp18
tmp21 = tmp19 + tmp20
tmp22 = tmp14 + tmp21
tmp25 = tmp23 + tmp24
tmp27 = tmp25 + tmp26
tmp29 = tmp27 + tmp28
tmp30 = tmp22 + tmp29
tmp31 = 4.0
tmp32 = tmp30 / tmp31
tl.store(out_ptr0 + x2, tmp32, xmask)
@triton.jit
def triton_poi_fused__softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_sum_8(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x3 = xindex % 16
x0 = xindex % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3 + 64 * x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x3 + 64 * x2), xmask)
tmp4 = tl.load(in_ptr1 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (32 + x3 + 64 * x2), xmask)
tmp8 = tl.load(in_ptr1 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x3 + 64 * x2), xmask)
tmp12 = tl.load(in_ptr1 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x4, tmp14, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_9(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_10(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-06
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_12(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-06
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_13(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_14(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_15(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (16, 4), (4, 1))
assert_size_stride(primals_3, (16, 4), (4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (16, 4), (4, 1))
assert_size_stride(primals_6, (16,), (1,))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_11, (16, 4), (4, 1))
assert_size_stride(primals_12, (16, 4), (4, 1))
assert_size_stride(primals_13, (16, 4), (4, 1))
assert_size_stride(primals_14, (16, 4), (4, 1))
assert_size_stride(primals_15, (16,), (1,))
assert_size_stride(primals_16, (4, 4), (4, 1))
assert_size_stride(primals_17, (4,), (1,))
assert_size_stride(primals_18, (4,), (1,))
assert_size_stride(primals_19, (4, 4), (4, 1))
assert_size_stride(primals_20, (4,), (1,))
assert_size_stride(primals_21, (4, 4), (4, 1))
assert_size_stride(primals_22, (4,), (1,))
assert_size_stride(primals_23, (4,), (1,))
assert_size_stride(primals_24, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf2)
del primals_4
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_div_0[grid(256)](buf0, buf3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused_clone_1[grid(64, 4)](buf1, buf4, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf5 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0)
del buf1
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_3[grid(256)](buf6, buf7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf8 = buf6
del buf6
triton_poi_fused_clone_4[grid(256)](buf2, buf8, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf9 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
triton_poi_fused_mean_5[grid(16)](buf9, buf10, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf11 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_6, reinterpret_tensor(buf10, (4, 4), (
4, 1), 0), reinterpret_tensor(primals_5, (4, 16), (1, 4), 0),
alpha=1, beta=1, out=buf11)
del primals_6
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_6[grid(64)](buf11, buf12, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_7[grid(64)](buf12, buf13, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf14 = reinterpret_tensor(buf12, (4, 1, 4, 4), (16, 1, 4, 1), 0)
del buf12
triton_poi_fused_mul_sum_8[grid(64)](buf9, buf13, buf14, 64, XBLOCK
=64, num_warps=1, num_stages=1)
buf15 = reinterpret_tensor(buf13, (16, 4), (4, 1), 0)
del buf13
extern_kernels.mm(reinterpret_tensor(buf14, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf15)
buf16 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf17 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_9[grid(16)](buf15, primals_1,
buf16, buf17, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_10[grid(64)](buf15,
primals_1, buf16, buf17, primals_8, primals_9, buf18, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_9
buf19 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_11, (4, 16), (1, 4), 0), out=buf19)
buf20 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_10, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_12, (4, 16), (1, 4), 0), out=buf20)
del primals_12
buf21 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_10, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_13, (4, 16), (1, 4), 0), out=buf21)
del primals_13
buf22 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_div_0[grid(256)](buf19, buf22, 256, XBLOCK=
128, num_warps=4, num_stages=1)
buf23 = reinterpret_tensor(buf19, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf19
triton_poi_fused_clone_1[grid(64, 4)](buf20, buf23, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf24 = reinterpret_tensor(buf20, (16, 4, 4), (16, 4, 1), 0)
del buf20
extern_kernels.bmm(reinterpret_tensor(buf22, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf23, (16, 4, 4), (16, 4, 1), 0), out=buf24
)
buf25 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(256)](buf24, buf25, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf26 = reinterpret_tensor(buf24, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf24
triton_poi_fused__softmax_3[grid(256)](buf25, buf26, 256, XBLOCK=
128, num_warps=4, num_stages=1)
buf27 = buf25
del buf25
triton_poi_fused_clone_4[grid(256)](buf21, buf27, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf28 = reinterpret_tensor(buf21, (16, 4, 4), (16, 4, 1), 0)
del buf21
extern_kernels.bmm(reinterpret_tensor(buf26, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf27, (16, 4, 4), (16, 4, 1), 0), out=buf28
)
buf29 = reinterpret_tensor(buf17, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf17
triton_poi_fused_mean_5[grid(16)](buf28, buf29, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf30 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_15, reinterpret_tensor(buf29, (4, 4),
(4, 1), 0), reinterpret_tensor(primals_14, (4, 16), (1, 4), 0),
alpha=1, beta=1, out=buf30)
del primals_15
buf31 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_6[grid(64)](buf30, buf31, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf32 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_7[grid(64)](buf31, buf32, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf33 = reinterpret_tensor(buf31, (4, 1, 4, 4), (16, 1, 4, 1), 0)
del buf31
triton_poi_fused_mul_sum_8[grid(64)](buf28, buf32, buf33, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf34 = reinterpret_tensor(buf32, (16, 4), (4, 1), 0)
del buf32
extern_kernels.mm(reinterpret_tensor(buf33, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_16, (4, 4), (1, 4), 0), out=buf34)
buf35 = reinterpret_tensor(buf34, (4, 4, 4), (16, 4, 1), 0)
del buf34
triton_poi_fused_add_11[grid(64)](buf35, buf18, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf36 = buf16
del buf16
buf37 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_native_layer_norm_12[grid(16)](buf35, buf36, buf37,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf38 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_13[grid(64)](buf35, buf36, buf37,
primals_17, primals_18, buf38, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del primals_18
buf39 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf38, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_19, (4, 4), (1, 4), 0), out=buf39)
buf40 = reinterpret_tensor(buf39, (4, 4, 4), (16, 4, 1), 0)
del buf39
buf46 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_14[grid(64)](buf40,
primals_20, buf46, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_20
buf41 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf40, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_21, (4, 4), (1, 4), 0), out=buf41)
buf42 = reinterpret_tensor(buf41, (4, 4, 4), (16, 4, 1), 0)
del buf41
triton_poi_fused_add_15[grid(64)](buf42, primals_22, buf38, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_22
buf43 = buf37
del buf37
buf44 = buf36
del buf36
triton_poi_fused_native_layer_norm_12[grid(16)](buf42, buf43, buf44,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf45 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_13[grid(64)](buf42, buf43, buf44,
primals_23, primals_24, buf45, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf43
del buf44
del primals_24
return (buf45, buf7, buf26, primals_1, primals_8, primals_17,
primals_23, buf7, buf9, reinterpret_tensor(buf10, (4, 4), (4, 1), 0
), buf11, reinterpret_tensor(buf14, (16, 4), (4, 1), 0), buf15,
reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor(
primals_10, (16, 4), (4, 1), 0), buf26, buf28, reinterpret_tensor(
buf29, (4, 4), (4, 1), 0), buf30, reinterpret_tensor(buf33, (16, 4),
(4, 1), 0), buf35, reinterpret_tensor(buf38, (16, 4), (4, 1), 0),
reinterpret_tensor(buf40, (16, 4), (4, 1), 0), buf42, primals_21,
buf46, primals_19, primals_16, primals_14, reinterpret_tensor(buf27,
(16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf22, (16, 4, 4), (
16, 1, 4), 0), reinterpret_tensor(buf23, (16, 4, 4), (16, 1, 4), 0),
primals_11, primals_7, primals_5, reinterpret_tensor(buf8, (16, 4,
4), (16, 1, 4), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4),
0), reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0))
class SPA(nn.Module):
""" Selective parallel attention """
def __init__(self, n_head: 'int'=8, d_v: 'int'=64):
super().__init__()
self.gap = nn.AdaptiveAvgPool1d(1)
self.sk = nn.Linear(d_v, n_head * d_v)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
bs, n_head, _lq, d_v = x.size()
u = x.sum(dim=1)
s = self.gap(u.transpose(1, 2)).view(bs, d_v)
v = self.sk(s)
v = v.view(bs, n_head, d_v)
v = self.softmax(v)
v = v.unsqueeze(2)
f = x * v.expand_as(x)
return f
class ScaledDotProductAttention(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature: 'float', attn_dropout: 'float'=0.1):
super().__init__()
self.temperature = temperature
self.attn_dropout = attn_dropout
self.dropout = nn.Dropout(attn_dropout)
def forward(self, q, k, v, mask=None):
attn = torch.matmul(q / self.temperature, k.transpose(2, 3))
if mask is not None:
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = self.dropout(F.softmax(attn, dim=-1))
output = torch.matmul(attn, v)
return output, attn
def __repr__(self):
tmpstr = self.__class__.__name__ + '('
tmpstr += 'temperature=' + str(self.temperature)
tmpstr += ', attn_dropout=' + str(self.attn_dropout)
tmpstr += ')'
return tmpstr
class MultiHeadAttention(nn.Module):
""" Multi-Head Attention module """
def __init__(self, n_head: 'int'=8, d_model: 'int'=512, d_k: 'int'=64,
d_v: 'int'=64, dropout: 'float'=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
if n_head > 1:
self.spa = SPA(n_head=n_head, d_v=d_v)
self.fc = nn.Linear(d_v, d_model, bias=False)
else:
self.fc = nn.Linear(n_head * d_v, d_model, bias=False)
self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-06)
def forward(self, q, k, v, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
if mask is not None:
mask = mask.unsqueeze(1)
q, attn = self.attention(q, k, v, mask=mask)
if n_head > 1:
q = self.spa(q)
q = q.sum(dim=1, keepdim=True)
q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1)
q = self.dropout(self.fc(q))
q += residual
q = self.layer_norm(q)
return q, attn
class PositionwiseFeedForward(nn.Module):
""" A two-feed-forward-layer module """
def __init__(self, d_in, d_hid, dropout=0.1):
super().__init__()
self.w_1 = nn.Linear(d_in, d_hid)
self.w_2 = nn.Linear(d_hid, d_in)
self.layer_norm = nn.LayerNorm(d_in, eps=1e-06)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
residual = x
x = self.w_2(F.relu(self.w_1(x)))
x = self.dropout(x)
x += residual
x = self.layer_norm(x)
return x
class DecoderLayerNew(nn.Module):
""" Compose with three layers """
def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):
super(DecoderLayerNew, self).__init__()
self.slf_attn = MultiHeadAttention(n_head, d_model, d_k, d_v,
dropout=dropout)
self.enc_attn = MultiHeadAttention(n_head, d_model, d_k, d_v,
dropout=dropout)
self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=
dropout)
def forward(self, input_0, input_1):
primals_2 = self.slf_attn.w_qs.weight
primals_3 = self.slf_attn.w_ks.weight
primals_4 = self.slf_attn.w_vs.weight
primals_5 = self.slf_attn.spa.sk.weight
primals_6 = self.slf_attn.spa.sk.bias
primals_7 = self.slf_attn.fc.weight
primals_8 = self.slf_attn.layer_norm.weight
primals_9 = self.slf_attn.layer_norm.bias
primals_11 = self.enc_attn.w_qs.weight
primals_12 = self.enc_attn.w_ks.weight
primals_13 = self.enc_attn.w_vs.weight
primals_14 = self.enc_attn.spa.sk.weight
primals_15 = self.enc_attn.spa.sk.bias
primals_16 = self.enc_attn.fc.weight
primals_17 = self.enc_attn.layer_norm.weight
primals_18 = self.enc_attn.layer_norm.bias
primals_19 = self.pos_ffn.w_1.weight
primals_20 = self.pos_ffn.w_1.bias
primals_21 = self.pos_ffn.w_2.weight
primals_22 = self.pos_ffn.w_2.bias
primals_23 = self.pos_ffn.layer_norm.weight
primals_24 = self.pos_ffn.layer_norm.bias
primals_1 = input_0
primals_10 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24])
return output[0], output[1], output[2]
| WOMMOW/AIT | DecoderLayer | false | 1,233 | [
"MIT"
] | 0 | 305fe7962bf9c5c24b6854e3ff0b7e2e669bf5a5 | https://github.com/WOMMOW/AIT/tree/305fe7962bf9c5c24b6854e3ff0b7e2e669bf5a5 | import torch
import torch.nn as nn
import torch.nn.functional as F
class SPA(nn.Module):
""" Selective parallel attention """
def __init__(self, n_head: 'int'=8, d_v: 'int'=64):
super().__init__()
self.gap = nn.AdaptiveAvgPool1d(1)
self.sk = nn.Linear(d_v, n_head * d_v)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
bs, n_head, _lq, d_v = x.size()
u = x.sum(dim=1)
s = self.gap(u.transpose(1, 2)).view(bs, d_v)
v = self.sk(s)
v = v.view(bs, n_head, d_v)
v = self.softmax(v)
v = v.unsqueeze(2)
f = x * v.expand_as(x)
return f
class ScaledDotProductAttention(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature: 'float', attn_dropout: 'float'=0.1):
super().__init__()
self.temperature = temperature
self.attn_dropout = attn_dropout
self.dropout = nn.Dropout(attn_dropout)
def forward(self, q, k, v, mask=None):
attn = torch.matmul(q / self.temperature, k.transpose(2, 3))
if mask is not None:
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = self.dropout(F.softmax(attn, dim=-1))
output = torch.matmul(attn, v)
return output, attn
def __repr__(self):
tmpstr = self.__class__.__name__ + '('
tmpstr += 'temperature=' + str(self.temperature)
tmpstr += ', attn_dropout=' + str(self.attn_dropout)
tmpstr += ')'
return tmpstr
class MultiHeadAttention(nn.Module):
""" Multi-Head Attention module """
def __init__(self, n_head: 'int'=8, d_model: 'int'=512, d_k: 'int'=64,
d_v: 'int'=64, dropout: 'float'=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
if n_head > 1:
self.spa = SPA(n_head=n_head, d_v=d_v)
self.fc = nn.Linear(d_v, d_model, bias=False)
else:
self.fc = nn.Linear(n_head * d_v, d_model, bias=False)
self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-06)
def forward(self, q, k, v, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
if mask is not None:
mask = mask.unsqueeze(1)
q, attn = self.attention(q, k, v, mask=mask)
if n_head > 1:
q = self.spa(q)
q = q.sum(dim=1, keepdim=True)
q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1)
q = self.dropout(self.fc(q))
q += residual
q = self.layer_norm(q)
return q, attn
class PositionwiseFeedForward(nn.Module):
""" A two-feed-forward-layer module """
def __init__(self, d_in, d_hid, dropout=0.1):
super().__init__()
self.w_1 = nn.Linear(d_in, d_hid)
self.w_2 = nn.Linear(d_hid, d_in)
self.layer_norm = nn.LayerNorm(d_in, eps=1e-06)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
residual = x
x = self.w_2(F.relu(self.w_1(x)))
x = self.dropout(x)
x += residual
x = self.layer_norm(x)
return x
class Model(nn.Module):
""" Compose with three layers """
def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):
super().__init__()
self.slf_attn = MultiHeadAttention(n_
# ... truncated (>4000 chars) for memory efficiency |
DocumentTopicDecoder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/um/cum65j23qchrjf5dndblqgbw6zomhgwfj2obfidtgy7b5j3zwklm.py
# Topologically Sorted Source Nodes: [topic_dist_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# topic_dist_1 => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/wk/cwk2wao7opapqbjj7klnqrd6tgist3ts3nc5veryzhzstwpx7d4l.py
# Topologically Sorted Source Nodes: [topic_dist_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# topic_dist_1 => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (12, 4), (4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12, ), (1, ))
assert_size_stride(primals_6, (12, ), (1, ))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [ret], Original ATen: [aten.mm]
extern_kernels.mm(primals_1, reinterpret_tensor(primals_3, (4, 12), (1, 4), 0), out=buf0)
del primals_3
buf1 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [ret], Original ATen: [aten.mm]
extern_kernels.mm(primals_2, reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), out=buf1)
del primals_4
# Topologically Sorted Source Nodes: [ret], Original ATen: [aten._thnn_fused_gru_cell]
buf2 = torch.ops.aten._thnn_fused_gru_cell.default(buf0, buf1, primals_2, primals_5, primals_6)
del buf0
del buf1
del primals_5
del primals_6
buf3 = buf2[0]
buf4 = buf2[1]
del buf2
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [topic_dist], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, buf3, reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf5)
del primals_8
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [topic_dist_1], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf5, buf6, 16, grid=grid(16), stream=stream0)
buf7 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [topic_dist_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf6, buf7, 16, grid=grid(16), stream=stream0)
del buf6
return (buf3, buf7, primals_1, primals_2, buf3, buf4, buf7, primals_7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.multiprocessing
from torch import nn
import torch.utils.data
class DocumentTopicDecoder(nn.Module):
def __init__(self, dim_h, num_topics):
super(DocumentTopicDecoder, self).__init__()
self.decoder = nn.GRUCell(input_size=dim_h, hidden_size=dim_h)
self.out_linear = nn.Linear(dim_h, num_topics)
self.softmax = nn.Softmax(dim=1)
def forward(self, input, hidden):
"""
Args:
- input (bsz, dim_h)
- hidden (bsz, dim_h)
- avail_topic_mask (bsz, num_topics)
Return:
- hidden_out (bsz, dim_h) : hidden state of this step
- topic_dist (bsz, num_topics) : probablity distribution of next sentence on topics
"""
hidden_out = self.decoder(input, hidden)
topic_dist = self.out_linear(hidden_out)
topic_dist = self.softmax(topic_dist)
return hidden_out, topic_dist
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'dim_h': 4, 'num_topics': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.multiprocessing
from torch import nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (12, 4), (4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12,), (1,))
assert_size_stride(primals_6, (12,), (1,))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_3, (4, 12),
(1, 4), 0), out=buf0)
del primals_3
buf1 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
extern_kernels.mm(primals_2, reinterpret_tensor(primals_4, (4, 12),
(1, 4), 0), out=buf1)
del primals_4
buf2 = torch.ops.aten._thnn_fused_gru_cell.default(buf0, buf1,
primals_2, primals_5, primals_6)
del buf0
del buf1
del primals_5
del primals_6
buf3 = buf2[0]
buf4 = buf2[1]
del buf2
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_8, buf3, reinterpret_tensor(primals_7,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf5)
del primals_8
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(16)](buf5, buf6, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf7 = buf5
del buf5
triton_poi_fused__softmax_1[grid(16)](buf6, buf7, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf6
return buf3, buf7, primals_1, primals_2, buf3, buf4, buf7, primals_7
class DocumentTopicDecoderNew(nn.Module):
def __init__(self, dim_h, num_topics):
super(DocumentTopicDecoderNew, self).__init__()
self.decoder = nn.GRUCell(input_size=dim_h, hidden_size=dim_h)
self.out_linear = nn.Linear(dim_h, num_topics)
self.softmax = nn.Softmax(dim=1)
def forward(self, input_0, input_1):
primals_3 = self.decoder.weight_ih
primals_4 = self.decoder.weight_hh
primals_5 = self.decoder.bias_ih
primals_6 = self.decoder.bias_hh
primals_1 = self.out_linear.weight
primals_8 = self.out_linear.bias
primals_2 = input_0
primals_7 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0], output[1]
| WuDiDaBinGe/TAKG | DocumentTopicDecoder | false | 1,234 | [
"MIT"
] | 0 | 83e608e677a4ee74722d18cb5ef430f4f6c6ad31 | https://github.com/WuDiDaBinGe/TAKG/tree/83e608e677a4ee74722d18cb5ef430f4f6c6ad31 | import torch
import torch.multiprocessing
from torch import nn
import torch.utils.data
class Model(nn.Module):
def __init__(self, dim_h, num_topics):
super().__init__()
self.decoder = nn.GRUCell(input_size=dim_h, hidden_size=dim_h)
self.out_linear = nn.Linear(dim_h, num_topics)
self.softmax = nn.Softmax(dim=1)
def forward(self, input, hidden):
"""
Args:
- input (bsz, dim_h)
- hidden (bsz, dim_h)
- avail_topic_mask (bsz, num_topics)
Return:
- hidden_out (bsz, dim_h) : hidden state of this step
- topic_dist (bsz, num_topics) : probablity distribution of next sentence on topics
"""
hidden_out = self.decoder(input, hidden)
topic_dist = self.out_linear(hidden_out)
topic_dist = self.softmax(topic_dist)
return hidden_out, topic_dist
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4, 4]
|
LinearMultiplicationComposition | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/p3/cp3qleddjiuuytozrtebx5pzf2ycpwtw4mkq2jsx7qqswymv2bm6.py
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %view_3), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(buf0, buf1, buf2, 256, grid=grid(256), stream=stream0)
return (buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.parallel
import torch.utils.data
import torch.distributions
class CompositionFunction(torch.nn.Module):
def __init__(self, representation_size: 'int'):
super().__init__()
def forward(self, x: 'torch.Tensor', y: 'torch.Tensor') ->torch.Tensor:
raise NotImplementedError
class LinearMultiplicationComposition(CompositionFunction):
def __init__(self, representation_size: 'int'):
super().__init__(representation_size)
self.linear_1 = torch.nn.Linear(representation_size,
representation_size)
self.linear_2 = torch.nn.Linear(representation_size,
representation_size)
def forward(self, x: 'torch.Tensor', y: 'torch.Tensor') ->torch.Tensor:
return self.linear_1(x) * self.linear_2(y)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'representation_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn.parallel
import torch.utils.data
import torch.distributions
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](buf0, buf1, buf2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), buf1
class CompositionFunction(torch.nn.Module):
def __init__(self, representation_size: 'int'):
super().__init__()
def forward(self, x: 'torch.Tensor', y: 'torch.Tensor') ->torch.Tensor:
raise NotImplementedError
class LinearMultiplicationCompositionNew(CompositionFunction):
def __init__(self, representation_size: 'int'):
super().__init__(representation_size)
self.linear_1 = torch.nn.Linear(representation_size,
representation_size)
self.linear_2 = torch.nn.Linear(representation_size,
representation_size)
def forward(self, input_0, input_1):
primals_1 = self.linear_1.weight
primals_2 = self.linear_1.bias
primals_4 = self.linear_2.weight
primals_5 = self.linear_2.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| XeniaOhmer/SystematicRepresentations | LinearMultiplicationComposition | false | 1,235 | [
"MIT"
] | 0 | 825208d1be659dc820e61f577cdb53afc47302f4 | https://github.com/XeniaOhmer/SystematicRepresentations/tree/825208d1be659dc820e61f577cdb53afc47302f4 | import torch
import torch.nn.parallel
import torch.utils.data
import torch.distributions
class CompositionFunction(torch.nn.Module):
def __init__(self, representation_size: 'int'):
super().__init__()
def forward(self, x: 'torch.Tensor', y: 'torch.Tensor') ->torch.Tensor:
raise NotImplementedError
class Model(CompositionFunction):
def __init__(self, representation_size: 'int'):
super().__init__(representation_size)
self.linear_1 = torch.nn.Linear(representation_size,
representation_size)
self.linear_2 = torch.nn.Linear(representation_size,
representation_size)
def forward(self, x: 'torch.Tensor', y: 'torch.Tensor') ->torch.Tensor:
return self.linear_1(x) * self.linear_2(y)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
LinearComposition | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ms/cmsuzohbg5nq52jnvirovzkvykrzzko5xomu7zyu5e5u2lhegppw.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
return (buf1, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.parallel
import torch.utils.data
import torch.distributions
class CompositionFunction(torch.nn.Module):
def __init__(self, representation_size: 'int'):
super().__init__()
def forward(self, x: 'torch.Tensor', y: 'torch.Tensor') ->torch.Tensor:
raise NotImplementedError
class LinearComposition(CompositionFunction):
def __init__(self, representation_size: 'int'):
super().__init__(representation_size)
self.linear = torch.nn.Linear(representation_size * 2,
representation_size)
def forward(self, x: 'torch.Tensor', y: 'torch.Tensor') ->torch.Tensor:
return self.linear(torch.cat((x, y), dim=1))
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'representation_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn.parallel
import torch.utils.data
import torch.distributions
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3,
(8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
return buf1, buf0
class CompositionFunction(torch.nn.Module):
def __init__(self, representation_size: 'int'):
super().__init__()
def forward(self, x: 'torch.Tensor', y: 'torch.Tensor') ->torch.Tensor:
raise NotImplementedError
class LinearCompositionNew(CompositionFunction):
def __init__(self, representation_size: 'int'):
super().__init__(representation_size)
self.linear = torch.nn.Linear(representation_size * 2,
representation_size)
def forward(self, input_0, input_1):
primals_3 = self.linear.weight
primals_4 = self.linear.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| XeniaOhmer/SystematicRepresentations | LinearComposition | false | 1,236 | [
"MIT"
] | 0 | 825208d1be659dc820e61f577cdb53afc47302f4 | https://github.com/XeniaOhmer/SystematicRepresentations/tree/825208d1be659dc820e61f577cdb53afc47302f4 | import torch
import torch.nn.parallel
import torch.utils.data
import torch.distributions
class CompositionFunction(torch.nn.Module):
def __init__(self, representation_size: 'int'):
super().__init__()
def forward(self, x: 'torch.Tensor', y: 'torch.Tensor') ->torch.Tensor:
raise NotImplementedError
class Model(CompositionFunction):
def __init__(self, representation_size: 'int'):
super().__init__(representation_size)
self.linear = torch.nn.Linear(representation_size * 2,
representation_size)
def forward(self, x: 'torch.Tensor', y: 'torch.Tensor') ->torch.Tensor:
return self.linear(torch.cat((x, y), dim=1))
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4]
|
LinearAdditionComposition | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/hk/chkw3kgvyjknvy3tuin72elihifeho7w4nt6ifpydj2ebu2nju26.py
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %view_3), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(buf2, primals_2, buf1, primals_5, 256, grid=grid(256), stream=stream0)
del buf1
del primals_2
del primals_5
return (buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.parallel
import torch.utils.data
import torch.distributions
class CompositionFunction(torch.nn.Module):
def __init__(self, representation_size: 'int'):
super().__init__()
def forward(self, x: 'torch.Tensor', y: 'torch.Tensor') ->torch.Tensor:
raise NotImplementedError
class LinearAdditionComposition(CompositionFunction):
def __init__(self, representation_size: 'int'):
super().__init__(representation_size)
self.linear_1 = torch.nn.Linear(representation_size,
representation_size)
self.linear_2 = torch.nn.Linear(representation_size,
representation_size)
def forward(self, x: 'torch.Tensor', y: 'torch.Tensor') ->torch.Tensor:
return self.linear_1(x) + self.linear_2(y)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'representation_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn.parallel
import torch.utils.data
import torch.distributions
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](buf2, primals_2, buf1, primals_5,
256, XBLOCK=256, num_warps=4, num_stages=1)
del buf1
del primals_2
del primals_5
return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0)
class CompositionFunction(torch.nn.Module):
def __init__(self, representation_size: 'int'):
super().__init__()
def forward(self, x: 'torch.Tensor', y: 'torch.Tensor') ->torch.Tensor:
raise NotImplementedError
class LinearAdditionCompositionNew(CompositionFunction):
def __init__(self, representation_size: 'int'):
super().__init__(representation_size)
self.linear_1 = torch.nn.Linear(representation_size,
representation_size)
self.linear_2 = torch.nn.Linear(representation_size,
representation_size)
def forward(self, input_0, input_1):
primals_1 = self.linear_1.weight
primals_2 = self.linear_1.bias
primals_4 = self.linear_2.weight
primals_5 = self.linear_2.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| XeniaOhmer/SystematicRepresentations | LinearAdditionComposition | false | 1,237 | [
"MIT"
] | 0 | 825208d1be659dc820e61f577cdb53afc47302f4 | https://github.com/XeniaOhmer/SystematicRepresentations/tree/825208d1be659dc820e61f577cdb53afc47302f4 | import torch
import torch.nn.parallel
import torch.utils.data
import torch.distributions
class CompositionFunction(torch.nn.Module):
def __init__(self, representation_size: 'int'):
super().__init__()
def forward(self, x: 'torch.Tensor', y: 'torch.Tensor') ->torch.Tensor:
raise NotImplementedError
class Model(CompositionFunction):
def __init__(self, representation_size: 'int'):
super().__init__(representation_size)
self.linear_1 = torch.nn.Linear(representation_size,
representation_size)
self.linear_2 = torch.nn.Linear(representation_size,
representation_size)
def forward(self, x: 'torch.Tensor', y: 'torch.Tensor') ->torch.Tensor:
return self.linear_1(x) + self.linear_2(y)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
SingleGate | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ms/cmsuzohbg5nq52jnvirovzkvykrzzko5xomu7zyu5e5u2lhegppw.py
# Topologically Sorted Source Nodes: [context_input], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# context_input => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/7l/c7lohrkrpc6lubqaxmbz5s7ens3afnmzezzbwcsy4ow5su3gdhpb.py
# Topologically Sorted Source Nodes: [context_gate, mul, sub, mul_1, context_fusion], Original ATen: [aten.sigmoid, aten.mul, aten.rsub, aten.add]
# Source node to ATen node mapping:
# context_fusion => add
# context_gate => sigmoid
# mul => mul
# mul_1 => mul_1
# sub => sub
# Graph fragment:
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%addmm,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %primals_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %sigmoid), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %primals_2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
triton_poi_fused_add_mul_rsub_sigmoid_1 = async_compile.triton('triton_poi_fused_add_mul_rsub_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_sigmoid_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (x2), xmask)
tmp6 = tl.load(in_ptr2 + (x2), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = 1.0
tmp5 = tmp4 - tmp1
tmp7 = tmp5 * tmp6
tmp8 = tmp3 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (1, 8), (8, 1))
assert_size_stride(primals_4, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [context_input], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0)
buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3, (8, 1), (1, 8), 0), alpha=1, beta=1, out=buf2)
del primals_3
del primals_4
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [context_gate, mul, sub, mul_1, context_fusion], Original ATen: [aten.sigmoid, aten.mul, aten.rsub, aten.add]
triton_poi_fused_add_mul_rsub_sigmoid_1.run(buf2, primals_1, primals_2, buf3, 16, grid=grid(16), stream=stream0)
return (buf3, primals_1, primals_2, buf0, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.multiprocessing
from torch import nn
import torch.utils.data
class SingleGate(nn.Module):
def __init__(self, vector_dim, topic_dim):
super().__init__()
assert vector_dim == topic_dim
self.fusion_linear = nn.Linear(vector_dim + topic_dim, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, source_vector, other_vector):
context_input = torch.cat((source_vector, other_vector), dim=1)
context_gate = self.sigmoid(self.fusion_linear(context_input))
context_fusion = context_gate * source_vector + (1.0 - context_gate
) * other_vector
return context_fusion
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'vector_dim': 4, 'topic_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.multiprocessing
from torch import nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + x2, xmask)
tmp6 = tl.load(in_ptr2 + x2, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = 1.0
tmp5 = tmp4 - tmp1
tmp7 = tmp5 * tmp6
tmp8 = tmp3 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (1, 8), (8, 1))
assert_size_stride(primals_4, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3,
(8, 1), (1, 8), 0), alpha=1, beta=1, out=buf2)
del primals_3
del primals_4
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_mul_rsub_sigmoid_1[grid(16)](buf2, primals_1,
primals_2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1)
return buf3, primals_1, primals_2, buf0, buf2
class SingleGateNew(nn.Module):
def __init__(self, vector_dim, topic_dim):
super().__init__()
assert vector_dim == topic_dim
self.fusion_linear = nn.Linear(vector_dim + topic_dim, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, input_0, input_1):
primals_3 = self.fusion_linear.weight
primals_4 = self.fusion_linear.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| WuDiDaBinGe/TAKG | SingleGate | false | 1,238 | [
"MIT"
] | 0 | 83e608e677a4ee74722d18cb5ef430f4f6c6ad31 | https://github.com/WuDiDaBinGe/TAKG/tree/83e608e677a4ee74722d18cb5ef430f4f6c6ad31 | import torch
import torch.multiprocessing
from torch import nn
import torch.utils.data
class Model(nn.Module):
def __init__(self, vector_dim, topic_dim):
super().__init__()
assert vector_dim == topic_dim
self.fusion_linear = nn.Linear(vector_dim + topic_dim, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, source_vector, other_vector):
context_input = torch.cat((source_vector, other_vector), dim=1)
context_gate = self.sigmoid(self.fusion_linear(context_input))
context_fusion = context_gate * source_vector + (1.0 - context_gate
) * other_vector
return context_fusion
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4, 4]
|
AvgReadout | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/2c/c2caasuan6xkydnq2bvliamlyid6cl5fcz5kcz2mnyns45wtxqbs.py
# Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# mean => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [0]), kwargs = {})
triton_poi_fused_mean_0 = async_compile.triton('triton_poi_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp3 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp5 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_poi_fused_mean_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class AvgReadout(nn.Module):
"""
Considering the efficiency of the method, we simply employ average pooling, computing the average of the set of embedding matrices
.. math::
\\begin{equation}
\\mathbf{H}=\\mathcal{Q}\\left(\\left\\{\\mathbf{H}^{(r)} \\mid r \\in \\mathcal{R}\\right\\}\\right)=\\frac{1}{|\\mathcal{R}|} \\sum_{r \\in \\mathcal{R}} \\mathbf{H}^{(r)}
\\end{equation}
"""
def __init__(self):
super(AvgReadout, self).__init__()
def forward(self, seq):
return torch.mean(seq, 0)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp3 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp5 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class AvgReadoutNew(nn.Module):
"""
Considering the efficiency of the method, we simply employ average pooling, computing the average of the set of embedding matrices
.. math::
\\begin{equation}
\\mathbf{H}=\\mathcal{Q}\\left(\\left\\{\\mathbf{H}^{(r)} \\mid r \\in \\mathcal{R}\\right\\}\\right)=\\frac{1}{|\\mathcal{R}|} \\sum_{r \\in \\mathcal{R}} \\mathbf{H}^{(r)}
\\end{equation}
"""
def __init__(self):
super(AvgReadoutNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Xinstein3033/OpenHGNN | AvgReadout | false | 1,239 | [
"Apache-2.0"
] | 0 | a9ca499834523419ecdaaa09e4b42f640486f262 | https://github.com/Xinstein3033/OpenHGNN/tree/a9ca499834523419ecdaaa09e4b42f640486f262 | import torch
import torch.nn as nn
class Model(nn.Module):
"""
Considering the efficiency of the method, we simply employ average pooling, computing the average of the set of embedding matrices
.. math::
\\begin{equation}
\\mathbf{H}=\\mathcal{Q}\\left(\\left\\{\\mathbf{H}^{(r)} \\mid r \\in \\mathcal{R}\\right\\}\\right)=\\frac{1}{|\\mathcal{R}|} \\sum_{r \\in \\mathcal{R}} \\mathbf{H}^{(r)}
\\end{equation}
"""
def __init__(self):
super().__init__()
def forward(self, seq):
return torch.mean(seq, 0)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
F_fully_connected | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/pd/cpdu37l3bj63bjibgjk2ueagf7o3e26iukuvw6axiaa2bjb2e6op.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (8, 8), (8, 1))
assert_size_stride(primals_5, (8, ), (1, ))
assert_size_stride(primals_6, (8, 8), (8, 1))
assert_size_stride(primals_7, (8, ), (1, ))
assert_size_stride(primals_8, (4, 8), (8, 1))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 8), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 8), (128, 32, 8, 1), 0); del buf0 # reuse
buf9 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf9, 512, grid=grid(512), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 8), (8, 1), 0), reinterpret_tensor(primals_4, (8, 8), (1, 8), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 8), (128, 32, 8, 1), 0); del buf2 # reuse
buf8 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf8, 512, grid=grid(512), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (64, 8), (8, 1), 0), reinterpret_tensor(primals_6, (8, 8), (1, 8), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 8), (128, 32, 8, 1), 0); del buf4 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf5, primals_7, buf7, 512, grid=grid(512), stream=stream0)
del primals_7
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 8), (8, 1), 0), reinterpret_tensor(primals_8, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf6)
del primals_9
return (reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 8), (8, 1), 0), reinterpret_tensor(buf3, (64, 8), (8, 1), 0), reinterpret_tensor(buf5, (64, 8), (8, 1), 0), primals_8, buf7, primals_6, buf8, primals_4, buf9, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((8, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((8, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class F_fully_connected(nn.Module):
"""Fully connected tranformation, not reversible, but used below."""
def __init__(self, size_in, size, internal_size=None, dropout=0.0):
super(F_fully_connected, self).__init__()
if not internal_size:
internal_size = 2 * size
self.d1 = nn.Dropout(p=dropout)
self.d2 = nn.Dropout(p=dropout)
self.d2b = nn.Dropout(p=dropout)
self.fc1 = nn.Linear(size_in, internal_size)
self.fc2 = nn.Linear(internal_size, internal_size)
self.fc2b = nn.Linear(internal_size, internal_size)
self.fc3 = nn.Linear(internal_size, size)
self.nl1 = nn.ReLU()
self.nl2 = nn.ReLU()
self.nl2b = nn.ReLU()
def forward(self, x):
out = self.nl1(self.d1(self.fc1(x)))
out = self.nl2(self.d2(self.fc2(out)))
out = self.nl2b(self.d2b(self.fc2b(out)))
out = self.fc3(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'size_in': 4, 'size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (8, 8), (8, 1))
assert_size_stride(primals_5, (8,), (1,))
assert_size_stride(primals_6, (8, 8), (8, 1))
assert_size_stride(primals_7, (8,), (1,))
assert_size_stride(primals_8, (4, 8), (8, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 8), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 8), (128, 32, 8, 1), 0)
del buf0
buf9 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(512)](buf1,
primals_2, buf9, 512, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_4, (8, 8), (1, 8), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 8), (128, 32, 8, 1), 0)
del buf2
buf8 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(512)](buf3,
primals_5, buf8, 512, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_6, (8, 8), (1, 8), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 8), (128, 32, 8, 1), 0)
del buf4
buf7 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(512)](buf5,
primals_7, buf7, 512, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 8), (
8, 1), 0), reinterpret_tensor(primals_8, (8, 4), (1, 8), 0),
alpha=1, beta=1, out=buf6)
del primals_9
return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 8), (8, 1), 0), reinterpret_tensor(
buf3, (64, 8), (8, 1), 0), reinterpret_tensor(buf5, (64, 8), (8, 1), 0
), primals_8, buf7, primals_6, buf8, primals_4, buf9
class F_fully_connectedNew(nn.Module):
"""Fully connected tranformation, not reversible, but used below."""
def __init__(self, size_in, size, internal_size=None, dropout=0.0):
super(F_fully_connectedNew, self).__init__()
if not internal_size:
internal_size = 2 * size
self.d1 = nn.Dropout(p=dropout)
self.d2 = nn.Dropout(p=dropout)
self.d2b = nn.Dropout(p=dropout)
self.fc1 = nn.Linear(size_in, internal_size)
self.fc2 = nn.Linear(internal_size, internal_size)
self.fc2b = nn.Linear(internal_size, internal_size)
self.fc3 = nn.Linear(internal_size, size)
self.nl1 = nn.ReLU()
self.nl2 = nn.ReLU()
self.nl2b = nn.ReLU()
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc2b.weight
primals_7 = self.fc2b.bias
primals_8 = self.fc3.weight
primals_9 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| Xenovortex/INN_Embedding_Classification | F_fully_connected | false | 1,240 | [
"MIT"
] | 0 | df31ec3dcf70780cae5140a69ffafdd64f218e5f | https://github.com/Xenovortex/INN_Embedding_Classification/tree/df31ec3dcf70780cae5140a69ffafdd64f218e5f | import torch
import torch.nn as nn
class Model(nn.Module):
"""Fully connected tranformation, not reversible, but used below."""
def __init__(self, size_in, size, internal_size=None, dropout=0.0):
super().__init__()
if not internal_size:
internal_size = 2 * size
self.d1 = nn.Dropout(p=dropout)
self.d2 = nn.Dropout(p=dropout)
self.d2b = nn.Dropout(p=dropout)
self.fc1 = nn.Linear(size_in, internal_size)
self.fc2 = nn.Linear(internal_size, internal_size)
self.fc2b = nn.Linear(internal_size, internal_size)
self.fc3 = nn.Linear(internal_size, size)
self.nl1 = nn.ReLU()
self.nl2 = nn.ReLU()
self.nl2b = nn.ReLU()
def forward(self, x):
out = self.nl1(self.d1(self.fc1(x)))
out = self.nl2(self.d2(self.fc2(out)))
out = self.nl2b(self.d2b(self.fc2b(out)))
out = self.fc3(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.